4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
26 /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
28 /* All Rights Reserved */
30 /* Copyright (c) 1987, 1988 Microsoft Corporation */
31 /* All Rights Reserved */
35 * Copyright 2012 Joyent, Inc. All rights reserved.
38 #include <sys/types.h>
39 #include <sys/sysmacros.h>
40 #include <sys/param.h>
41 #include <sys/signal.h>
42 #include <sys/systm.h>
46 #include <sys/class.h>
48 #include <sys/syscall.h>
49 #include <sys/cpuvar.h>
51 #include <sys/sysinfo.h>
52 #include <sys/fault.h>
53 #include <sys/stack.h>
55 #include <sys/regset.h>
59 #include <sys/vtrace.h>
60 #include <sys/cmn_err.h>
61 #include <sys/prsystm.h>
62 #include <sys/mutex_impl.h>
63 #include <sys/machsystm.h>
64 #include <sys/archsystm.h>
66 #include <sys/avintr.h>
71 #include <vm/seg_kmem.h>
74 #include <vm/hat_pte.h>
75 #include <vm/hat_i86.h>
77 #include <sys/procfs.h>
79 #include <sys/reboot.h>
80 #include <sys/debug.h>
81 #include <sys/debugreg.h>
82 #include <sys/modctl.h>
83 #include <sys/aio_impl.h>
85 #include <sys/tnf_probe.h>
88 #include <sys/x86_archext.h>
89 #include <sys/copyops.h>
91 #include <sys/ftrace.h>
92 #include <sys/panic.h>
93 #include <sys/traptrace.h>
94 #include <sys/ontrap.h>
95 #include <sys/cpc_impl.h>
96 #include <sys/bootconf.h>
97 #include <sys/bootinfo.h>
98 #include <sys/promif.h>
99 #include <sys/mach_mmu.h>
101 #include <sys/hypervisor.h>
103 #include <sys/contract/process_impl.h>
105 #define USER 0x10000 /* user-mode flag added to trap type */
107 static const char *trap_type_mnemonic
[] = {
108 "de", "db", "2", "bp",
109 "of", "br", "ud", "nm",
110 "df", "9", "ts", "np",
111 "ss", "gp", "pf", "15",
112 "mf", "ac", "mc", "xf"
115 static const char *trap_type
[] = {
116 "Divide error", /* trap id 0 */
117 "Debug", /* trap id 1 */
118 "NMI interrupt", /* trap id 2 */
119 "Breakpoint", /* trap id 3 */
120 "Overflow", /* trap id 4 */
121 "BOUND range exceeded", /* trap id 5 */
122 "Invalid opcode", /* trap id 6 */
123 "Device not available", /* trap id 7 */
124 "Double fault", /* trap id 8 */
125 "Coprocessor segment overrun", /* trap id 9 */
126 "Invalid TSS", /* trap id 10 */
127 "Segment not present", /* trap id 11 */
128 "Stack segment fault", /* trap id 12 */
129 "General protection", /* trap id 13 */
130 "Page fault", /* trap id 14 */
131 "Reserved", /* trap id 15 */
132 "x87 floating point error", /* trap id 16 */
133 "Alignment check", /* trap id 17 */
134 "Machine check", /* trap id 18 */
135 "SIMD floating point exception", /* trap id 19 */
138 #define TRAP_TYPES (sizeof (trap_type) / sizeof (trap_type[0]))
140 #define SLOW_SCALL_SIZE 2
141 #define FAST_SCALL_SIZE 2
148 #if defined(TRAPDEBUG) || defined(lint)
156 #endif /* defined(TRAPDEBUG) || defined(lint) */
158 #if defined(TRAPTRACE)
160 * trap trace record for cpu0 is allocated here.
161 * trap trace records for non-boot cpus are allocated in mp_startup_init().
163 static trap_trace_rec_t trap_tr0
[TRAPTR_NENT
];
164 trap_trace_ctl_t trap_trace_ctl
[NCPU
] = {
166 (uintptr_t)trap_tr0
, /* next record */
167 (uintptr_t)trap_tr0
, /* first record */
168 (uintptr_t)(trap_tr0
+ TRAPTR_NENT
), /* limit */
169 (uintptr_t)0 /* current */
174 * default trap buffer size
176 size_t trap_trace_bufsize
= TRAPTR_NENT
* sizeof (trap_trace_rec_t
);
177 int trap_trace_freeze
= 0;
178 int trap_trace_off
= 0;
181 * A dummy TRAPTRACE entry to use after death.
183 trap_trace_rec_t trap_trace_postmort
;
185 static void dump_ttrace(void);
186 #endif /* TRAPTRACE */
187 static void dumpregs(struct regs
*);
188 static void showregs(uint_t
, struct regs
*, caddr_t
);
189 static int kern_gpfault(struct regs
*);
193 die(uint_t type
, struct regs
*rp
, caddr_t addr
, processorid_t cpuid
)
195 struct panic_trap_info ti
;
196 const char *trap_name
, *trap_mnemonic
;
198 if (type
< TRAP_TYPES
) {
199 trap_name
= trap_type
[type
];
200 trap_mnemonic
= trap_type_mnemonic
[type
];
211 ti
.trap_type
= type
& ~USER
;
214 curthread
->t_panic_trap
= &ti
;
216 if (type
== T_PGFLT
&& addr
< (caddr_t
)KERNELBASE
) {
217 panic("BAD TRAP: type=%x (#%s %s) rp=%p addr=%p "
218 "occurred in module \"%s\" due to %s",
219 type
, trap_mnemonic
, trap_name
, (void *)rp
, (void *)addr
,
220 mod_containing_pc((caddr_t
)rp
->r_pc
),
221 addr
< (caddr_t
)PAGESIZE
?
222 "a NULL pointer dereference" :
223 "an illegal access to a user address");
225 panic("BAD TRAP: type=%x (#%s %s) rp=%p addr=%p",
226 type
, trap_mnemonic
, trap_name
, (void *)rp
, (void *)addr
);
231 * Rewrite the instruction at pc to be an int $T_SYSCALLINT instruction.
233 * int <vector> is two bytes: 0xCD <vector>
237 rewrite_syscall(caddr_t pc
)
239 uchar_t instr
[SLOW_SCALL_SIZE
] = { 0xCD, T_SYSCALLINT
};
241 if (uwrite(curthread
->t_procp
, instr
, SLOW_SCALL_SIZE
,
249 * Test to see if the instruction at pc is sysenter or syscall. The second
250 * argument should be the x86 feature flag corresponding to the expected
253 * sysenter is two bytes: 0x0F 0x34
254 * syscall is two bytes: 0x0F 0x05
255 * int $T_SYSCALLINT is two bytes: 0xCD 0x91
259 instr_is_other_syscall(caddr_t pc
, int which
)
261 uchar_t instr
[FAST_SCALL_SIZE
];
263 ASSERT(which
== X86FSET_SEP
|| which
== X86FSET_ASYSC
|| which
== 0xCD);
265 if (copyin_nowatch(pc
, (caddr_t
)instr
, FAST_SCALL_SIZE
) != 0)
270 if (instr
[0] == 0x0F && instr
[1] == 0x34)
274 if (instr
[0] == 0x0F && instr
[1] == 0x05)
278 if (instr
[0] == 0xCD && instr
[1] == T_SYSCALLINT
)
287 syscall_insn_string(int syscall_insn
)
289 switch (syscall_insn
) {
302 ldt_rewrite_syscall(struct regs
*rp
, proc_t
*p
, int syscall_insn
)
307 mutex_enter(&p
->p_ldtlock
); /* Must be held across linear_pc() */
309 if (linear_pc(rp
, p
, &linearpc
) == 0) {
312 * If another thread beat us here, it already changed
313 * this site to the slower (int) syscall instruction.
315 if (instr_is_other_syscall(linearpc
, 0xCD)) {
317 } else if (instr_is_other_syscall(linearpc
, syscall_insn
)) {
319 if (rewrite_syscall(linearpc
) == 0) {
324 cmn_err(CE_WARN
, "failed to rewrite %s "
325 "instruction in process %d",
326 syscall_insn_string(syscall_insn
),
332 mutex_exit(&p
->p_ldtlock
); /* Must be held across linear_pc() */
334 return (return_code
);
338 * Test to see if the instruction at pc is a system call instruction.
340 * The bytes of an lcall instruction used for the syscall trap.
341 * static uchar_t lcall[7] = { 0x9a, 0, 0, 0, 0, 0x7, 0 };
342 * static uchar_t lcallalt[7] = { 0x9a, 0, 0, 0, 0, 0x27, 0 };
348 instr_is_lcall_syscall(caddr_t pc
)
350 uchar_t instr
[LCALLSIZE
];
352 if (copyin_nowatch(pc
, (caddr_t
)instr
, LCALLSIZE
) == 0 &&
358 (instr
[5] == 0x7 || instr
[5] == 0x27) &&
368 * In the first revisions of amd64 CPUs produced by AMD, the LAHF and
369 * SAHF instructions were not implemented in 64-bit mode. Later revisions
370 * did implement these instructions. An extension to the cpuid instruction
371 * was added to check for the capability of executing these instructions
374 * Intel originally did not implement these instructions in EM64T either,
375 * but added them in later revisions.
377 * So, there are different chip revisions by both vendors out there that
378 * may or may not implement these instructions. The easy solution is to
379 * just always emulate these instructions on demand.
381 * SAHF == store %ah in the lower 8 bits of %rflags (opcode 0x9e)
382 * LAHF == load the lower 8 bits of %rflags into %ah (opcode 0x9f)
388 instr_is_lsahf(caddr_t pc
, uchar_t
*instr
)
390 if (copyin_nowatch(pc
, (caddr_t
)instr
, LSAHFSIZE
) == 0 &&
391 (*instr
== 0x9e || *instr
== 0x9f))
397 * Emulate the LAHF and SAHF instructions. The reference manuals define
398 * these instructions to always load/store bit 1 as a 1, and bits 3 and 5
399 * as a 0. The other, defined, bits are copied (the PS_ICC bits and PS_P).
401 * Note that %ah is bits 8-15 of %rax.
404 emulate_lsahf(struct regs
*rp
, uchar_t instr
)
407 /* sahf. Copy bits from %ah to flags. */
408 rp
->r_ps
= (rp
->r_ps
& ~0xff) |
409 ((rp
->r_rax
>> 8) & PSL_LSAHFMASK
) | PS_MB1
;
411 /* lahf. Copy bits from flags to %ah. */
412 rp
->r_rax
= (rp
->r_rax
& ~0xff00) |
413 (((rp
->r_ps
& PSL_LSAHFMASK
) | PS_MB1
) << 8);
415 rp
->r_pc
+= LSAHFSIZE
;
419 #ifdef OPTERON_ERRATUM_91
422 * Test to see if the instruction at pc is a prefetch instruction.
424 * The first byte of prefetch instructions is always 0x0F.
425 * The second byte is 0x18 for regular prefetch or 0x0D for AMD 3dnow prefetch.
426 * The third byte (ModRM) contains the register field bits (bits 3-5).
427 * These bits must be between 0 and 3 inclusive for regular prefetch and
428 * 0 and 1 inclusive for AMD 3dnow prefetch.
430 * In 64-bit mode, there may be a one-byte REX prefex (0x40-0x4F).
434 cmp_to_prefetch(uchar_t
*p
)
437 if ((p
[0] & 0xF0) == 0x40) /* 64-bit REX prefix */
440 return ((p
[0] == 0x0F && p
[1] == 0x18 && ((p
[2] >> 3) & 7) <= 3) ||
441 (p
[0] == 0x0F && p
[1] == 0x0D && ((p
[2] >> 3) & 7) <= 1));
445 instr_is_prefetch(caddr_t pc
)
447 uchar_t instr
[4]; /* optional REX prefix plus 3-byte opcode */
449 return (copyin_nowatch(pc
, instr
, sizeof (instr
)) == 0 &&
450 cmp_to_prefetch(instr
));
453 #endif /* OPTERON_ERRATUM_91 */
456 * Called from the trap handler when a processor trap occurs.
458 * Note: All user-level traps that might call stop() must exit
459 * trap() by 'goto out' or by falling through.
460 * Note Also: trap() is usually called with interrupts enabled, (PS_IE == 1)
461 * however, there are paths that arrive here with PS_IE == 0 so special care
462 * must be taken in those cases.
465 trap(struct regs
*rp
, caddr_t addr
, processorid_t cpuid
)
467 kthread_t
*ct
= curthread
;
470 proc_t
*p
= ttoproc(ct
);
471 klwp_t
*lwp
= ttolwp(ct
);
474 faultcode_t
pagefault(), res
, errcode
;
475 enum fault_type fault_type
;
483 int singlestep_twiddle
;
490 ASSERT_STACK_ALIGNED();
493 CPU_STATS_ADDQ(CPU
, sys
, trap
, 1);
494 ASSERT(ct
->t_schedflag
& TS_DONT_SWAP
);
496 if (type
== T_PGFLT
) {
499 if (errcode
& PF_ERR_WRITE
)
501 else if ((caddr_t
)rp
->r_pc
== addr
||
502 (mmu
.pt_nx
!= 0 && (errcode
& PF_ERR_EXEC
)))
509 * Pentium Pro work-around
511 if ((errcode
& PF_ERR_PROT
) && pentiumpro_bug4046376
) {
513 uint_t priv_violation
;
514 uint_t access_violation
;
516 if (hat_getattr(addr
< (caddr_t
)kernelbase
?
517 curproc
->p_as
->a_hat
: kas
.a_hat
, addr
, &attr
)
519 errcode
&= ~PF_ERR_PROT
;
521 priv_violation
= (errcode
& PF_ERR_USER
) &&
523 access_violation
= (errcode
& PF_ERR_WRITE
) &&
524 !(attr
& PROT_WRITE
);
525 if (!priv_violation
&& !access_violation
)
531 } else if (type
== T_SGLSTP
&& lwp
!= NULL
)
532 lwp
->lwp_pcb
.pcb_drstat
= (uintptr_t)addr
;
535 showregs(type
, rp
, addr
);
537 if (USERMODE(rp
->r_cs
)) {
539 * Set up the current cred to use during this trap. u_cred
540 * no longer exists. t_cred is used instead.
541 * The current process credential applies to the thread for
542 * the entire trap. If trapping from the kernel, this
543 * should already be set up.
545 if (ct
->t_cred
!= p
->p_cred
) {
546 cred_t
*oldcred
= ct
->t_cred
;
548 * DTrace accesses t_cred in probe context. t_cred
549 * must always be either NULL, or point to a valid,
550 * allocated cred structure.
552 ct
->t_cred
= crgetcred();
557 ASSERT(lwptoregs(lwp
) == rp
);
558 lwp
->lwp_state
= LWP_SYS
;
562 if ((caddr_t
)rp
->r_pc
== addr
)
572 TNF_PROBE_1(thread_state
, "thread", /* CSTYLED */,
573 tnf_microstate
, state
, mstate
);
574 mstate
= new_mstate(ct
, mstate
);
576 bzero(&siginfo
, sizeof (siginfo
));
582 case T_SGLSTP
+ USER
:
583 case T_BPTFLT
+ USER
:
587 FTRACE_2("trap(): type=0x%lx, regs=0x%lx",
588 (ulong_t
)type
, (ulong_t
)rp
);
594 /* Make sure we enable interrupts before die()ing */
595 sti(); /* The SIMD exception comes in via cmninttrap */
600 showregs(type
, rp
, (caddr_t
)0);
601 printf("trap: Unknown trap type %d in user mode\n",
603 siginfo
.si_signo
= SIGILL
;
604 siginfo
.si_code
= ILL_ILLTRP
;
605 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
606 siginfo
.si_trapno
= type
& ~USER
;
610 (void) die(type
, rp
, addr
, cpuid
);
614 case T_PGFLT
: /* system page fault */
616 * If we're under on_trap() protection (see <sys/ontrap.h>),
617 * set ot_trap and bounce back to the on_trap() call site
618 * via the installed trampoline.
620 if ((ct
->t_ontrap
!= NULL
) &&
621 (ct
->t_ontrap
->ot_prot
& OT_DATA_ACCESS
)) {
622 ct
->t_ontrap
->ot_trap
|= OT_DATA_ACCESS
;
623 rp
->r_pc
= ct
->t_ontrap
->ot_trampoline
;
628 * If we have an Instruction fault in kernel mode, then that
629 * means we've tried to execute a user page (SMEP) or both of
630 * PAE and NXE are enabled. In either case, given that it's a
631 * kernel fault, we should panic immediately and not try to make
632 * any more forward progress. This indicates a bug in the
633 * kernel, which if execution continued, could be exploited to
634 * wreak havoc on the system.
636 if (errcode
& PF_ERR_EXEC
) {
637 (void) die(type
, rp
, addr
, cpuid
);
641 * We need to check if SMAP is in play. If SMAP is in play, then
642 * any access to a user page will show up as a protection
643 * violation. To see if SMAP is enabled we first check if it's a
644 * user address and whether we have the feature flag set. If we
645 * do and the interrupted registers do not allow for user
646 * accesses (PS_ACHK is not enabled), then we need to die
649 if (addr
< (caddr_t
)kernelbase
&&
650 is_x86_feature(x86_featureset
, X86FSET_SMAP
) == B_TRUE
&&
651 (rp
->r_ps
& PS_ACHK
) == 0) {
652 (void) die(type
, rp
, addr
, cpuid
);
656 * See if we can handle as pagefault. Save lofault and onfault
657 * across this. Here we assume that an address less than
658 * KERNELBASE is a user fault. We can do this as copy.s
659 * routines verify that the starting address is less than
660 * KERNELBASE before starting and because we know that we
661 * always have KERNELBASE mapped as invalid to serve as a
664 lofault
= ct
->t_lofault
;
665 onfault
= ct
->t_onfault
;
668 mstate
= new_mstate(ct
, LMS_KFAULT
);
670 if (addr
< (caddr_t
)kernelbase
) {
671 res
= pagefault(addr
,
672 (errcode
& PF_ERR_PROT
)? F_PROT
: F_INVAL
, rw
, 0);
673 if (res
== FC_NOMAP
&&
674 addr
< p
->p_usrstack
&&
678 res
= pagefault(addr
,
679 (errcode
& PF_ERR_PROT
)? F_PROT
: F_INVAL
, rw
, 1);
681 (void) new_mstate(ct
, mstate
);
684 * Restore lofault and onfault. If we resolved the fault, exit.
685 * If we didn't and lofault wasn't set, die.
687 ct
->t_lofault
= lofault
;
688 ct
->t_onfault
= onfault
;
692 #if defined(OPTERON_ERRATUM_93) && defined(_LP64)
693 if (lofault
== 0 && opteron_erratum_93
) {
695 * Workaround for Opteron Erratum 93. On return from
696 * a System Managment Interrupt at a HLT instruction
697 * the %rip might be truncated to a 32 bit value.
698 * BIOS is supposed to fix this, but some don't.
699 * If this occurs we simply restore the high order bits.
700 * The HLT instruction is 1 byte of 0xf4.
702 uintptr_t rip
= rp
->r_pc
;
704 if ((rip
& 0xfffffffful
) == rip
) {
705 rip
|= 0xfffffffful
<< 32;
706 if (hat_getpfnum(kas
.a_hat
, (caddr_t
)rip
) !=
708 (*(uchar_t
*)rip
== 0xf4 ||
709 *(uchar_t
*)(rip
- 1) == 0xf4)) {
715 #endif /* OPTERON_ERRATUM_93 && _LP64 */
717 #ifdef OPTERON_ERRATUM_91
718 if (lofault
== 0 && opteron_erratum_91
) {
720 * Workaround for Opteron Erratum 91. Prefetches may
721 * generate a page fault (they're not supposed to do
722 * that!). If this occurs we simply return back to the
725 caddr_t pc
= (caddr_t
)rp
->r_pc
;
728 * If the faulting PC is not mapped, this is a
729 * legitimate kernel page fault that must result in a
730 * panic. If the faulting PC is mapped, it could contain
731 * a prefetch instruction. Check for that here.
733 if (hat_getpfnum(kas
.a_hat
, pc
) != PFN_INVALID
) {
734 if (cmp_to_prefetch((uchar_t
*)pc
)) {
736 cmn_err(CE_WARN
, "Opteron erratum 91 "
737 "occurred: kernel prefetch"
738 " at %p generated a page fault!",
744 (void) die(type
, rp
, addr
, cpuid
);
746 #endif /* OPTERON_ERRATUM_91 */
749 (void) die(type
, rp
, addr
, cpuid
);
752 * Cannot resolve fault. Return to lofault.
755 showregs(type
, rp
, addr
);
758 if (FC_CODE(res
) == FC_OBJERR
)
763 rp
->r_pc
= ct
->t_lofault
;
766 case T_PGFLT
+ USER
: /* user page fault */
784 printf("user %s fault: addr=0x%lx errcode=0x%x\n",
785 fault_str
, (uintptr_t)addr
, errcode
);
788 #if defined(OPTERON_ERRATUM_100) && defined(_LP64)
790 * Workaround for AMD erratum 100
792 * A 32-bit process may receive a page fault on a non
793 * 32-bit address by mistake. The range of the faulting
796 * 0xffffffff80000000 .. 0xffffffffffffffff or
797 * 0x0000000100000000 .. 0x000000017fffffff
799 * The fault is always due to an instruction fetch, however
800 * the value of r_pc should be correct (in 32 bit range),
801 * so we ignore the page fault on the bogus address.
803 if (p
->p_model
== DATAMODEL_ILP32
&&
804 (0xffffffff80000000 <= (uintptr_t)addr
||
805 (0x100000000 <= (uintptr_t)addr
&&
806 (uintptr_t)addr
<= 0x17fffffff))) {
807 if (!opteron_erratum_100
)
808 panic("unexpected erratum #100");
809 if (rp
->r_pc
<= 0xffffffff)
812 #endif /* OPTERON_ERRATUM_100 && _LP64 */
814 ASSERT(!(curthread
->t_flag
& T_WATCHPT
));
815 watchpage
= (pr_watch_active(p
) && pr_is_watchpage(addr
, rw
));
818 * In 32-bit mode, the lcall (system call) instruction fetches
819 * one word from the stack, at the stack pointer, because of the
820 * way the call gate is constructed. This is a bogus
821 * read and should not be counted as a read watchpoint.
822 * We work around the problem here by testing to see if
823 * this situation applies and, if so, simply jumping to
824 * the code in locore.s that fields the system call trap.
825 * The registers on the stack are already set up properly
826 * due to the match between the call gate sequence and the
827 * trap gate sequence. We just have to adjust the pc.
829 if (watchpage
&& addr
== (caddr_t
)rp
->r_sp
&&
830 rw
== S_READ
&& instr_is_lcall_syscall((caddr_t
)rp
->r_pc
)) {
831 extern void watch_syscall(void);
833 rp
->r_pc
+= LCALLSIZE
;
834 watch_syscall(); /* never returns */
839 if (!watchpage
|| (sz
= instr_size(rp
, &vaddr
, rw
)) <= 0)
840 fault_type
= (errcode
& PF_ERR_PROT
)? F_PROT
: F_INVAL
;
841 else if ((watchcode
= pr_is_watchpoint(&vaddr
, &ta
,
842 sz
, NULL
, rw
)) != 0) {
844 do_watch_step(vaddr
, sz
, rw
,
845 watchcode
, rp
->r_pc
);
846 fault_type
= F_INVAL
;
848 bzero(&siginfo
, sizeof (siginfo
));
849 siginfo
.si_signo
= SIGTRAP
;
850 siginfo
.si_code
= watchcode
;
851 siginfo
.si_addr
= vaddr
;
852 siginfo
.si_trapafter
= 0;
853 siginfo
.si_pc
= (caddr_t
)rp
->r_pc
;
858 /* XXX pr_watch_emul() never succeeds (for now) */
859 if (rw
!= S_EXEC
&& pr_watch_emul(rp
, vaddr
, rw
))
861 do_watch_step(vaddr
, sz
, rw
, 0, 0);
862 fault_type
= F_INVAL
;
865 res
= pagefault(addr
, fault_type
, rw
, 0);
868 * If pagefault() succeeded, ok.
869 * Otherwise attempt to grow the stack.
873 addr
< p
->p_usrstack
&&
875 lwp
->lwp_lastfault
= FLTPAGE
;
876 lwp
->lwp_lastfaddr
= addr
;
877 if (prismember(&p
->p_fltmask
, FLTPAGE
)) {
878 bzero(&siginfo
, sizeof (siginfo
));
879 siginfo
.si_addr
= addr
;
880 (void) stop_on_fault(FLTPAGE
, &siginfo
);
883 } else if (res
== FC_PROT
&& addr
< p
->p_usrstack
&&
884 (mmu
.pt_nx
!= 0 && (errcode
& PF_ERR_EXEC
))) {
885 report_stack_exec(p
, addr
);
888 #ifdef OPTERON_ERRATUM_91
890 * Workaround for Opteron Erratum 91. Prefetches may generate a
891 * page fault (they're not supposed to do that!). If this
892 * occurs we simply return back to the instruction.
894 * We rely on copyin to properly fault in the page with r_pc.
896 if (opteron_erratum_91
&&
897 addr
!= (caddr_t
)rp
->r_pc
&&
898 instr_is_prefetch((caddr_t
)rp
->r_pc
)) {
900 cmn_err(CE_WARN
, "Opteron erratum 91 occurred: "
901 "prefetch at %p in pid %d generated a trap!",
902 (void *)rp
->r_pc
, p
->p_pid
);
906 #endif /* OPTERON_ERRATUM_91 */
909 showregs(type
, rp
, addr
);
911 * In the case where both pagefault and grow fail,
912 * set the code to the value provided by pagefault.
913 * We map all errors returned from pagefault() to SIGSEGV.
915 bzero(&siginfo
, sizeof (siginfo
));
916 siginfo
.si_addr
= addr
;
917 switch (FC_CODE(res
)) {
920 siginfo
.si_signo
= SIGBUS
;
921 siginfo
.si_code
= BUS_ADRERR
;
925 siginfo
.si_signo
= SIGBUS
;
926 siginfo
.si_code
= BUS_ADRALN
;
930 if ((siginfo
.si_errno
= FC_ERRNO(res
)) != EINTR
) {
931 siginfo
.si_signo
= SIGBUS
;
932 siginfo
.si_code
= BUS_OBJERR
;
936 default: /* FC_NOMAP or FC_PROT */
937 siginfo
.si_signo
= SIGSEGV
;
939 (res
== FC_NOMAP
)? SEGV_MAPERR
: SEGV_ACCERR
;
945 case T_ILLINST
+ USER
: /* invalid opcode fault */
947 * If the syscall instruction is disabled due to LDT usage, a
948 * user program that attempts to execute it will trigger a #ud
949 * trap. Check for that case here. If this occurs on a CPU which
950 * doesn't even support syscall, the result of all of this will
951 * be to emulate that particular instruction.
953 if (p
->p_ldt
!= NULL
&&
954 ldt_rewrite_syscall(rp
, p
, X86FSET_ASYSC
))
959 * Emulate the LAHF and SAHF instructions if needed.
960 * See the instr_is_lsahf function for details.
962 if (p
->p_model
== DATAMODEL_LP64
&&
963 instr_is_lsahf((caddr_t
)rp
->r_pc
, &instr
)) {
964 emulate_lsahf(rp
, instr
);
972 showregs(type
, rp
, (caddr_t
)0);
973 siginfo
.si_signo
= SIGILL
;
974 siginfo
.si_code
= ILL_ILLOPC
;
975 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
979 case T_ZERODIV
+ USER
: /* integer divide by zero */
980 if (tudebug
&& tudebugfpe
)
981 showregs(type
, rp
, (caddr_t
)0);
982 siginfo
.si_signo
= SIGFPE
;
983 siginfo
.si_code
= FPE_INTDIV
;
984 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
988 case T_OVFLW
+ USER
: /* integer overflow */
989 if (tudebug
&& tudebugfpe
)
990 showregs(type
, rp
, (caddr_t
)0);
991 siginfo
.si_signo
= SIGFPE
;
992 siginfo
.si_code
= FPE_INTOVF
;
993 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
997 case T_NOEXTFLT
+ USER
: /* math coprocessor not available */
998 if (tudebug
&& tudebugfpe
)
999 showregs(type
, rp
, addr
);
1000 if (fpnoextflt(rp
)) {
1001 siginfo
.si_signo
= SIGILL
;
1002 siginfo
.si_code
= ILL_ILLOPC
;
1003 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1008 case T_EXTOVRFLT
: /* extension overrun fault */
1009 /* check if we took a kernel trap on behalf of user */
1011 extern void ndptrap_frstor(void);
1012 if (rp
->r_pc
!= (uintptr_t)ndptrap_frstor
) {
1013 sti(); /* T_EXTOVRFLT comes in via cmninttrap */
1014 (void) die(type
, rp
, addr
, cpuid
);
1019 case T_EXTOVRFLT
+ USER
: /* extension overrun fault */
1020 if (tudebug
&& tudebugfpe
)
1021 showregs(type
, rp
, addr
);
1022 if (fpextovrflt(rp
)) {
1023 siginfo
.si_signo
= SIGSEGV
;
1024 siginfo
.si_code
= SEGV_MAPERR
;
1025 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1030 case T_EXTERRFLT
: /* x87 floating point exception pending */
1031 /* check if we took a kernel trap on behalf of user */
1033 extern void ndptrap_frstor(void);
1034 if (rp
->r_pc
!= (uintptr_t)ndptrap_frstor
) {
1035 sti(); /* T_EXTERRFLT comes in via cmninttrap */
1036 (void) die(type
, rp
, addr
, cpuid
);
1042 case T_EXTERRFLT
+ USER
: /* x87 floating point exception pending */
1043 if (tudebug
&& tudebugfpe
)
1044 showregs(type
, rp
, addr
);
1045 if (sicode
= fpexterrflt(rp
)) {
1046 siginfo
.si_signo
= SIGFPE
;
1047 siginfo
.si_code
= sicode
;
1048 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1053 case T_SIMDFPE
+ USER
: /* SSE and SSE2 exceptions */
1054 if (tudebug
&& tudebugsse
)
1055 showregs(type
, rp
, addr
);
1056 if (!is_x86_feature(x86_featureset
, X86FSET_SSE
) &&
1057 !is_x86_feature(x86_featureset
, X86FSET_SSE2
)) {
1059 * There are rumours that some user instructions
1060 * on older CPUs can cause this trap to occur; in
1061 * which case send a SIGILL instead of a SIGFPE.
1063 siginfo
.si_signo
= SIGILL
;
1064 siginfo
.si_code
= ILL_ILLTRP
;
1065 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1066 siginfo
.si_trapno
= type
& ~USER
;
1068 } else if ((sicode
= fpsimderrflt(rp
)) != 0) {
1069 siginfo
.si_signo
= SIGFPE
;
1070 siginfo
.si_code
= sicode
;
1071 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1075 sti(); /* The SIMD exception comes in via cmninttrap */
1078 case T_BPTFLT
: /* breakpoint trap */
1080 * Kernel breakpoint traps should only happen when kmdb is
1081 * active, and even then, it'll have interposed on the IDT, so
1082 * control won't get here. If it does, we've hit a breakpoint
1083 * without the debugger, which is very strange, and very
1086 if (tudebug
&& tudebugbpt
)
1087 showregs(type
, rp
, (caddr_t
)0);
1089 (void) die(type
, rp
, addr
, cpuid
);
1092 case T_SGLSTP
: /* single step/hw breakpoint exception */
1094 /* Now evaluate how we got here */
1095 if (lwp
!= NULL
&& (lwp
->lwp_pcb
.pcb_drstat
& DR_SINGLESTEP
)) {
1097 * i386 single-steps even through lcalls which
1098 * change the privilege level. So we take a trap at
1099 * the first instruction in privileged mode.
1101 * Set a flag to indicate that upon completion of
1102 * the system call, deal with the single-step trap.
1104 * The same thing happens for sysenter, too.
1106 singlestep_twiddle
= 0;
1107 if (rp
->r_pc
== (uintptr_t)sys_sysenter
||
1108 rp
->r_pc
== (uintptr_t)brand_sys_sysenter
) {
1109 singlestep_twiddle
= 1;
1110 #if defined(__amd64)
1112 * Since we are already on the kernel's
1113 * %gs, on 64-bit systems the sysenter case
1114 * needs to adjust the pc to avoid
1115 * executing the swapgs instruction at the
1116 * top of the handler.
1118 if (rp
->r_pc
== (uintptr_t)sys_sysenter
)
1119 rp
->r_pc
= (uintptr_t)
1120 _sys_sysenter_post_swapgs
;
1122 rp
->r_pc
= (uintptr_t)
1123 _brand_sys_sysenter_post_swapgs
;
1127 else if (rp
->r_pc
== (uintptr_t)sys_call
||
1128 rp
->r_pc
== (uintptr_t)brand_sys_call
) {
1129 singlestep_twiddle
= 1;
1133 /* not on sysenter/syscall; uregs available */
1134 if (tudebug
&& tudebugbpt
)
1135 showregs(type
, rp
, (caddr_t
)0);
1137 if (singlestep_twiddle
) {
1138 rp
->r_ps
&= ~PS_T
; /* turn off trace */
1139 lwp
->lwp_pcb
.pcb_flags
|= DEBUG_PENDING
;
1145 /* XXX - needs review on debugger interface? */
1146 if (boothowto
& RB_DEBUG
)
1147 debug_enter((char *)NULL
);
1149 (void) die(type
, rp
, addr
, cpuid
);
1152 case T_NMIFLT
: /* NMI interrupt */
1153 printf("Unexpected NMI in system mode\n");
1156 case T_NMIFLT
+ USER
: /* NMI interrupt */
1157 printf("Unexpected NMI in user mode\n");
1160 case T_GPFLT
: /* general protection violation */
1162 * Any #GP that occurs during an on_trap .. no_trap bracket
1163 * with OT_DATA_ACCESS or OT_SEGMENT_ACCESS protection,
1164 * or in a on_fault .. no_fault bracket, is forgiven
1165 * and we trampoline. This protection is given regardless
1166 * of whether we are 32/64 bit etc - if a distinction is
1167 * required then define new on_trap protection types.
1169 * On amd64, we can get a #gp from referencing addresses
1170 * in the virtual address hole e.g. from a copyin or in
1171 * update_sregs while updating user segment registers.
1173 * On the 32-bit hypervisor we could also generate one in
1174 * mfn_to_pfn by reaching around or into where the hypervisor
1175 * lives which is protected by segmentation.
1179 * If we're under on_trap() protection (see <sys/ontrap.h>),
1180 * set ot_trap and trampoline back to the on_trap() call site
1181 * for OT_DATA_ACCESS or OT_SEGMENT_ACCESS.
1183 if (ct
->t_ontrap
!= NULL
) {
1184 int ttype
= ct
->t_ontrap
->ot_prot
&
1185 (OT_DATA_ACCESS
| OT_SEGMENT_ACCESS
);
1188 ct
->t_ontrap
->ot_trap
|= ttype
;
1190 showregs(type
, rp
, (caddr_t
)0);
1191 rp
->r_pc
= ct
->t_ontrap
->ot_trampoline
;
1197 * If we're under lofault protection (copyin etc.),
1198 * longjmp back to lofault with an EFAULT.
1200 if (ct
->t_lofault
) {
1202 * Fault is not resolvable, so just return to lofault
1205 showregs(type
, rp
, addr
);
1209 rp
->r_pc
= ct
->t_lofault
;
1214 * We fall through to the next case, which repeats
1215 * the OT_SEGMENT_ACCESS check which we've already
1216 * done, so we'll always fall through to the
1220 case T_SEGFLT
: /* segment not present fault */
1222 * One example of this is #NP in update_sregs while
1223 * attempting to update a user segment register
1224 * that points to a descriptor that is marked not
1227 if (ct
->t_ontrap
!= NULL
&&
1228 ct
->t_ontrap
->ot_prot
& OT_SEGMENT_ACCESS
) {
1229 ct
->t_ontrap
->ot_trap
|= OT_SEGMENT_ACCESS
;
1231 showregs(type
, rp
, (caddr_t
)0);
1232 rp
->r_pc
= ct
->t_ontrap
->ot_trampoline
;
1236 case T_STKFLT
: /* stack fault */
1237 case T_TSSFLT
: /* invalid TSS fault */
1239 showregs(type
, rp
, (caddr_t
)0);
1240 if (kern_gpfault(rp
))
1241 (void) die(type
, rp
, addr
, cpuid
);
1245 * ONLY 32-bit PROCESSES can USE a PRIVATE LDT! 64-bit apps
1246 * should have no need for them, so we put a stop to it here.
1248 * So: not-present fault is ONLY valid for 32-bit processes with
1249 * a private LDT trying to do a system call. Emulate it.
1251 * #gp fault is ONLY valid for 32-bit processes also, which DO NOT
1252 * have a private LDT, and are trying to do a system call. Emulate it.
1255 case T_SEGFLT
+ USER
: /* segment not present fault */
1256 case T_GPFLT
+ USER
: /* general protection violation */
1257 #ifdef _SYSCALL32_IMPL
1258 if (p
->p_model
!= DATAMODEL_NATIVE
) {
1259 #endif /* _SYSCALL32_IMPL */
1260 if (instr_is_lcall_syscall((caddr_t
)rp
->r_pc
)) {
1261 if (type
== T_SEGFLT
+ USER
)
1262 ASSERT(p
->p_ldt
!= NULL
);
1264 if ((p
->p_ldt
== NULL
&& type
== T_GPFLT
+ USER
) ||
1265 type
== T_SEGFLT
+ USER
) {
1268 * The user attempted a system call via the obsolete
1269 * call gate mechanism. Because the process doesn't have
1270 * an LDT (i.e. the ldtr contains 0), a #gp results.
1271 * Emulate the syscall here, just as we do above for a
1276 * Since this is a not-present trap, rp->r_pc points to
1277 * the trapping lcall instruction. We need to bump it
1278 * to the next insn so the app can continue on.
1280 rp
->r_pc
+= LCALLSIZE
;
1284 * Normally the microstate of the LWP is forced back to
1285 * LMS_USER by the syscall handlers. Emulate that
1294 #ifdef _SYSCALL32_IMPL
1296 #endif /* _SYSCALL32_IMPL */
1298 * If the current process is using a private LDT and the
1299 * trapping instruction is sysenter, the sysenter instruction
1300 * has been disabled on the CPU because it destroys segment
1301 * registers. If this is the case, rewrite the instruction to
1302 * be a safe system call and retry it. If this occurs on a CPU
1303 * which doesn't even support sysenter, the result of all of
1304 * this will be to emulate that particular instruction.
1306 if (p
->p_ldt
!= NULL
&&
1307 ldt_rewrite_syscall(rp
, p
, X86FSET_SEP
))
1312 case T_BOUNDFLT
+ USER
: /* bound fault */
1313 case T_STKFLT
+ USER
: /* stack fault */
1314 case T_TSSFLT
+ USER
: /* invalid TSS fault */
1316 showregs(type
, rp
, (caddr_t
)0);
1317 siginfo
.si_signo
= SIGSEGV
;
1318 siginfo
.si_code
= SEGV_MAPERR
;
1319 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1323 case T_ALIGNMENT
+ USER
: /* user alignment error (486) */
1325 showregs(type
, rp
, (caddr_t
)0);
1326 bzero(&siginfo
, sizeof (siginfo
));
1327 siginfo
.si_signo
= SIGBUS
;
1328 siginfo
.si_code
= BUS_ADRALN
;
1329 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1333 case T_SGLSTP
+ USER
: /* single step/hw breakpoint exception */
1334 if (tudebug
&& tudebugbpt
)
1335 showregs(type
, rp
, (caddr_t
)0);
1337 /* Was it single-stepping? */
1338 if (lwp
->lwp_pcb
.pcb_drstat
& DR_SINGLESTEP
) {
1339 pcb_t
*pcb
= &lwp
->lwp_pcb
;
1343 * If both NORMAL_STEP and WATCH_STEP are in effect,
1344 * give precedence to WATCH_STEP. If neither is set,
1345 * user must have set the PS_T bit in %efl; treat this
1348 if ((fault
= undo_watch_step(&siginfo
)) == 0 &&
1349 ((pcb
->pcb_flags
& NORMAL_STEP
) ||
1350 !(pcb
->pcb_flags
& WATCH_STEP
))) {
1351 siginfo
.si_signo
= SIGTRAP
;
1352 siginfo
.si_code
= TRAP_TRACE
;
1353 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1356 pcb
->pcb_flags
&= ~(NORMAL_STEP
|WATCH_STEP
);
1360 case T_BPTFLT
+ USER
: /* breakpoint trap */
1361 if (tudebug
&& tudebugbpt
)
1362 showregs(type
, rp
, (caddr_t
)0);
1364 * int 3 (the breakpoint instruction) leaves the pc referring
1365 * to the address one byte after the breakpointed address.
1366 * If the P_PR_BPTADJ flag has been set via /proc, We adjust
1367 * it back so it refers to the breakpointed address.
1369 if (p
->p_proc_flag
& P_PR_BPTADJ
)
1371 siginfo
.si_signo
= SIGTRAP
;
1372 siginfo
.si_code
= TRAP_BRKPT
;
1373 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1379 * This occurs only after the cs register has been made to
1380 * look like a kernel selector, either through debugging or
1381 * possibly by functions like setcontext(). The thread is
1382 * about to cause a general protection fault at common_iret()
1383 * in locore. We let that happen immediately instead of
1384 * doing the T_AST processing.
1388 case T_AST
+ USER
: /* profiling, resched, h/w error pseudo trap */
1389 if (lwp
->lwp_pcb
.pcb_flags
& ASYNC_HWERR
) {
1390 proc_t
*p
= ttoproc(curthread
);
1391 extern void print_msg_hwerr(ctid_t ct_id
, proc_t
*p
);
1393 lwp
->lwp_pcb
.pcb_flags
&= ~ASYNC_HWERR
;
1394 print_msg_hwerr(p
->p_ct_process
->conp_contract
.ct_id
,
1396 contract_process_hwerr(p
->p_ct_process
, p
);
1397 siginfo
.si_signo
= SIGKILL
;
1398 siginfo
.si_code
= SI_NOINFO
;
1399 } else if (lwp
->lwp_pcb
.pcb_flags
& CPC_OVERFLOW
) {
1400 lwp
->lwp_pcb
.pcb_flags
&= ~CPC_OVERFLOW
;
1401 if (kcpc_overflow_ast()) {
1403 * Signal performance counter overflow
1406 showregs(type
, rp
, (caddr_t
)0);
1407 bzero(&siginfo
, sizeof (siginfo
));
1408 siginfo
.si_signo
= SIGEMT
;
1409 siginfo
.si_code
= EMT_CPCOVF
;
1410 siginfo
.si_addr
= (caddr_t
)rp
->r_pc
;
1419 * We can't get here from a system trap
1421 ASSERT(type
& USER
);
1424 /* We took a fault so abort single step. */
1425 lwp
->lwp_pcb
.pcb_flags
&= ~(NORMAL_STEP
|WATCH_STEP
);
1427 * Remember the fault and fault adddress
1428 * for real-time (SIGPROF) profiling.
1430 lwp
->lwp_lastfault
= fault
;
1431 lwp
->lwp_lastfaddr
= siginfo
.si_addr
;
1433 DTRACE_PROC2(fault
, int, fault
, ksiginfo_t
*, &siginfo
);
1436 * If a debugger has declared this fault to be an
1437 * event of interest, stop the lwp. Otherwise just
1438 * deliver the associated signal.
1440 if (siginfo
.si_signo
!= SIGKILL
&&
1441 prismember(&p
->p_fltmask
, fault
) &&
1442 stop_on_fault(fault
, &siginfo
) == 0)
1443 siginfo
.si_signo
= 0;
1446 if (siginfo
.si_signo
)
1447 trapsig(&siginfo
, (fault
!= FLTFPE
&& fault
!= FLTCPCOVF
));
1449 if (lwp
->lwp_oweupc
)
1450 profil_tick(rp
->r_pc
);
1452 if (ct
->t_astflag
| ct
->t_sig_check
) {
1454 * Turn off the AST flag before checking all the conditions that
1455 * may have caused an AST. This flag is on whenever a signal or
1456 * unusual condition should be handled after the next trap or
1461 * If a single-step trap occurred on a syscall (see above)
1462 * recognize it now. Do this before checking for signals
1463 * because deferred_singlestep_trap() may generate a SIGTRAP to
1464 * the LWP or may otherwise mark the LWP to call issig(FORREAL).
1466 if (lwp
->lwp_pcb
.pcb_flags
& DEBUG_PENDING
)
1467 deferred_singlestep_trap((caddr_t
)rp
->r_pc
);
1469 ct
->t_sig_check
= 0;
1472 * As in other code paths that check against TP_CHANGEBIND,
1473 * we perform the check first without p_lock held -- only
1474 * acquiring p_lock in the unlikely event that it is indeed
1475 * set. This is safe because we are doing this after the
1476 * astoff(); if we are racing another thread setting
1477 * TP_CHANGEBIND on us, we will pick it up on a subsequent
1480 if (curthread
->t_proc_flag
& TP_CHANGEBIND
) {
1481 mutex_enter(&p
->p_lock
);
1482 if (curthread
->t_proc_flag
& TP_CHANGEBIND
) {
1484 curthread
->t_proc_flag
&= ~TP_CHANGEBIND
;
1486 mutex_exit(&p
->p_lock
);
1490 * for kaio requests that are on the per-process poll queue,
1491 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1492 * should copyout their result_t to user memory. by copying
1493 * out the result_t, the user can poll on memory waiting
1494 * for the kaio request to complete.
1499 * If this LWP was asked to hold, call holdlwp(), which will
1500 * stop. holdlwps() sets this up and calls pokelwps() which
1501 * sets the AST flag.
1503 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1504 * through lwp_rtt(). That flag is set if the lwp_create(2)
1505 * syscall failed after creating the LWP.
1511 * All code that sets signals and makes ISSIG evaluate true must
1512 * set t_astflag afterwards.
1514 if (ISSIG_PENDING(ct
, lwp
, p
)) {
1517 ct
->t_sig_check
= 1;
1520 if (ct
->t_rprof
!= NULL
) {
1521 realsigprof(0, 0, 0);
1522 ct
->t_sig_check
= 1;
1526 * /proc can't enable/disable the trace bit itself
1527 * because that could race with the call gate used by
1528 * system calls via "lcall". If that happened, an
1529 * invalid EFLAGS would result. prstep()/prnostep()
1530 * therefore schedule an AST for the purpose.
1532 if (lwp
->lwp_pcb
.pcb_flags
& REQUEST_STEP
) {
1533 lwp
->lwp_pcb
.pcb_flags
&= ~REQUEST_STEP
;
1536 if (lwp
->lwp_pcb
.pcb_flags
& REQUEST_NOSTEP
) {
1537 lwp
->lwp_pcb
.pcb_flags
&= ~REQUEST_NOSTEP
;
1542 out
: /* We can't get here from a system trap */
1543 ASSERT(type
& USER
);
1549 * Set state to LWP_USER here so preempt won't give us a kernel
1550 * priority if it occurs after this point. Call CL_TRAPRET() to
1551 * restore the user-level priority.
1553 * It is important that no locks (other than spinlocks) be entered
1554 * after this point before returning to user mode (unless lwp_state
1555 * is set back to LWP_SYS).
1557 lwp
->lwp_state
= LWP_USER
;
1559 if (ct
->t_trapret
) {
1565 if (CPU
->cpu_runrun
|| curthread
->t_schedflag
& TS_ANYWAITQ
)
1568 (void) new_mstate(ct
, mstate
);
1571 TNF_PROBE_1(thread_state
, "thread", /* CSTYLED */,
1572 tnf_microstate
, state
, LMS_USER
);
1576 cleanup
: /* system traps end up here */
1577 ASSERT(!(type
& USER
));
1581 * Patch non-zero to disable preemption of threads in the kernel.
1583 int IGNORE_KERNEL_PREEMPTION
= 0; /* XXX - delete this someday */
1585 struct kpreempt_cnts
{ /* kernel preemption statistics */
1586 int kpc_idle
; /* executing idle thread */
1587 int kpc_intr
; /* executing interrupt thread */
1588 int kpc_clock
; /* executing clock thread */
1589 int kpc_blocked
; /* thread has blocked preemption (t_preempt) */
1590 int kpc_notonproc
; /* thread is surrendering processor */
1591 int kpc_inswtch
; /* thread has ratified scheduling decision */
1592 int kpc_prilevel
; /* processor interrupt level is too high */
1593 int kpc_apreempt
; /* asynchronous preemption */
1594 int kpc_spreempt
; /* synchronous preemption */
1598 * kernel preemption: forced rescheduling, preempt the running kernel thread.
1599 * the argument is old PIL for an interrupt,
1600 * or the distingished value KPREEMPT_SYNC.
1603 kpreempt(int asyncspl
)
1605 kthread_t
*ct
= curthread
;
1607 if (IGNORE_KERNEL_PREEMPTION
) {
1608 aston(CPU
->cpu_dispthread
);
1613 * Check that conditions are right for kernel preemption
1616 if (ct
->t_preempt
) {
1618 * either a privileged thread (idle, panic, interrupt)
1619 * or will check when t_preempt is lowered
1620 * We need to specifically handle the case where
1621 * the thread is in the middle of swtch (resume has
1622 * been called) and has its t_preempt set
1623 * [idle thread and a thread which is in kpreempt
1624 * already] and then a high priority thread is
1625 * available in the local dispatch queue.
1626 * In this case the resumed thread needs to take a
1627 * trap so that it can call kpreempt. We achieve
1628 * this by using siron().
1629 * How do we detect this condition:
1630 * idle thread is running and is in the midst of
1631 * resume: curthread->t_pri == -1 && CPU->dispthread
1633 * Need to ensure that this happens only at high pil
1634 * resume is called at high pil
1635 * Only in resume_from_idle is the pil changed.
1637 if (ct
->t_pri
< 0) {
1638 kpreempt_cnts
.kpc_idle
++;
1639 if (CPU
->cpu_dispthread
!= CPU
->cpu_thread
)
1641 } else if (ct
->t_flag
& T_INTR_THREAD
) {
1642 kpreempt_cnts
.kpc_intr
++;
1643 if (ct
->t_pil
== CLOCK_LEVEL
)
1644 kpreempt_cnts
.kpc_clock
++;
1646 kpreempt_cnts
.kpc_blocked
++;
1647 if (CPU
->cpu_dispthread
!= CPU
->cpu_thread
)
1650 aston(CPU
->cpu_dispthread
);
1653 if (ct
->t_state
!= TS_ONPROC
||
1654 ct
->t_disp_queue
!= CPU
->cpu_disp
) {
1655 /* this thread will be calling swtch() shortly */
1656 kpreempt_cnts
.kpc_notonproc
++;
1657 if (CPU
->cpu_thread
!= CPU
->cpu_dispthread
) {
1658 /* already in swtch(), force another */
1659 kpreempt_cnts
.kpc_inswtch
++;
1664 if (getpil() >= DISP_LEVEL
) {
1666 * We can't preempt this thread if it is at
1667 * a PIL >= DISP_LEVEL since it may be holding
1668 * a spin lock (like sched_lock).
1670 siron(); /* check back later */
1671 kpreempt_cnts
.kpc_prilevel
++;
1674 if (!interrupts_enabled()) {
1676 * Can't preempt while running with ints disabled
1678 kpreempt_cnts
.kpc_prilevel
++;
1681 if (asyncspl
!= KPREEMPT_SYNC
)
1682 kpreempt_cnts
.kpc_apreempt
++;
1684 kpreempt_cnts
.kpc_spreempt
++;
1689 } while (CPU
->cpu_kprunrun
);
1693 * Print out debugging info.
1696 showregs(uint_t type
, struct regs
*rp
, caddr_t addr
)
1702 if (PTOU(curproc
)->u_comm
[0])
1703 printf("%s: ", PTOU(curproc
)->u_comm
);
1704 if (type
< TRAP_TYPES
)
1705 printf("#%s %s\n", trap_type_mnemonic
[type
], trap_type
[type
]);
1709 printf("Syscall Trap:\n");
1715 printf("Bad Trap = %d\n", type
);
1718 if (type
== T_PGFLT
) {
1719 printf("Bad %s fault at addr=0x%lx\n",
1720 USERMODE(rp
->r_cs
) ? "user": "kernel", (uintptr_t)addr
);
1722 printf("addr=0x%lx\n", (uintptr_t)addr
);
1725 printf("pid=%d, pc=0x%lx, sp=0x%lx, eflags=0x%lx\n",
1726 (ttoproc(curthread
) && ttoproc(curthread
)->p_pidp
) ?
1727 ttoproc(curthread
)->p_pid
: 0, rp
->r_pc
, rp
->r_sp
, rp
->r_ps
);
1731 * this clause can be deleted when lint bug 4870403 is fixed
1732 * (lint thinks that bit 32 is illegal in a %b format string)
1734 printf("cr0: %x cr4: %b\n",
1735 (uint_t
)getcr0(), (uint_t
)getcr4(), FMT_CR4
);
1737 printf("cr0: %b cr4: %b\n",
1738 (uint_t
)getcr0(), FMT_CR0
, (uint_t
)getcr4(), FMT_CR4
);
1741 printf("cr2: %lx", getcr2());
1743 printf("cr3: %lx", getcr3());
1744 #if defined(__amd64)
1745 printf("cr8: %lx\n", getcr8());
1755 dumpregs(struct regs
*rp
)
1757 #if defined(__amd64)
1758 const char fmt
[] = "\t%3s: %16lx %3s: %16lx %3s: %16lx\n";
1760 printf(fmt
, "rdi", rp
->r_rdi
, "rsi", rp
->r_rsi
, "rdx", rp
->r_rdx
);
1761 printf(fmt
, "rcx", rp
->r_rcx
, " r8", rp
->r_r8
, " r9", rp
->r_r9
);
1762 printf(fmt
, "rax", rp
->r_rax
, "rbx", rp
->r_rbx
, "rbp", rp
->r_rbp
);
1763 printf(fmt
, "r10", rp
->r_r10
, "r11", rp
->r_r11
, "r12", rp
->r_r12
);
1764 printf(fmt
, "r13", rp
->r_r13
, "r14", rp
->r_r14
, "r15", rp
->r_r15
);
1766 printf(fmt
, "fsb", rdmsr(MSR_AMD_FSBASE
), "gsb", rdmsr(MSR_AMD_GSBASE
),
1768 printf(fmt
, " es", rp
->r_es
, " fs", rp
->r_fs
, " gs", rp
->r_gs
);
1770 printf(fmt
, "trp", rp
->r_trapno
, "err", rp
->r_err
, "rip", rp
->r_rip
);
1771 printf(fmt
, " cs", rp
->r_cs
, "rfl", rp
->r_rfl
, "rsp", rp
->r_rsp
);
1773 printf("\t%3s: %16lx\n", " ss", rp
->r_ss
);
1775 #elif defined(__i386)
1776 const char fmt
[] = "\t%3s: %8lx %3s: %8lx %3s: %8lx %3s: %8lx\n";
1778 printf(fmt
, " gs", rp
->r_gs
, " fs", rp
->r_fs
,
1779 " es", rp
->r_es
, " ds", rp
->r_ds
);
1780 printf(fmt
, "edi", rp
->r_edi
, "esi", rp
->r_esi
,
1781 "ebp", rp
->r_ebp
, "esp", rp
->r_esp
);
1782 printf(fmt
, "ebx", rp
->r_ebx
, "edx", rp
->r_edx
,
1783 "ecx", rp
->r_ecx
, "eax", rp
->r_eax
);
1784 printf(fmt
, "trp", rp
->r_trapno
, "err", rp
->r_err
,
1785 "eip", rp
->r_eip
, " cs", rp
->r_cs
);
1786 printf("\t%3s: %8lx %3s: %8lx %3s: %8lx\n",
1787 "efl", rp
->r_efl
, "usp", rp
->r_uesp
, " ss", rp
->r_ss
);
1793 * Test to see if the instruction is iret on i386 or iretq on amd64.
1795 * On the hypervisor we can only test for nopop_sys_rtt_syscall. If true
1796 * then we are in the context of hypervisor's failsafe handler because it
1797 * tried to iret and failed due to a bad selector. See xen_failsafe_callback.
1800 instr_is_iret(caddr_t pc
)
1804 extern void nopop_sys_rtt_syscall(void);
1805 return ((pc
== (caddr_t
)nopop_sys_rtt_syscall
) ? 1 : 0);
1809 #if defined(__amd64)
1810 static const uint8_t iret_insn
[2] = { 0x48, 0xcf }; /* iretq */
1812 #elif defined(__i386)
1813 static const uint8_t iret_insn
[1] = { 0xcf }; /* iret */
1815 return (bcmp(pc
, iret_insn
, sizeof (iret_insn
)) == 0);
1823 * Test to see if the instruction is part of __SEGREGS_POP
1825 * Note carefully the appallingly awful dependency between
1826 * the instruction sequence used in __SEGREGS_POP and these
1827 * instructions encoded here.
1830 instr_is_segregs_pop(caddr_t pc
)
1832 static const uint8_t movw_0_esp_gs
[4] = { 0x8e, 0x6c, 0x24, 0x0 };
1833 static const uint8_t movw_4_esp_fs
[4] = { 0x8e, 0x64, 0x24, 0x4 };
1834 static const uint8_t movw_8_esp_es
[4] = { 0x8e, 0x44, 0x24, 0x8 };
1835 static const uint8_t movw_c_esp_ds
[4] = { 0x8e, 0x5c, 0x24, 0xc };
1837 if (bcmp(pc
, movw_0_esp_gs
, sizeof (movw_0_esp_gs
)) == 0 ||
1838 bcmp(pc
, movw_4_esp_fs
, sizeof (movw_4_esp_fs
)) == 0 ||
1839 bcmp(pc
, movw_8_esp_es
, sizeof (movw_8_esp_es
)) == 0 ||
1840 bcmp(pc
, movw_c_esp_ds
, sizeof (movw_c_esp_ds
)) == 0)
1849 * Test to see if the instruction is part of _sys_rtt.
1851 * Again on the hypervisor if we try to IRET to user land with a bad code
1852 * or stack selector we will get vectored through xen_failsafe_callback.
1853 * In which case we assume we got here via _sys_rtt since we only allow
1854 * IRET to user land to take place in _sys_rtt.
1857 instr_is_sys_rtt(caddr_t pc
)
1859 extern void _sys_rtt(), _sys_rtt_end();
1861 if ((uintptr_t)pc
< (uintptr_t)_sys_rtt
||
1862 (uintptr_t)pc
> (uintptr_t)_sys_rtt_end
)
1869 * Handle #gp faults in kernel mode.
1871 * One legitimate way this can happen is if we attempt to update segment
1872 * registers to naughty values on the way out of the kernel.
1874 * This can happen in a couple of ways: someone - either accidentally or
1875 * on purpose - creates (setcontext(2), lwp_create(2)) or modifies
1876 * (signal(2)) a ucontext that contains silly segment register values.
1877 * Or someone - either accidentally or on purpose - modifies the prgregset_t
1878 * of a subject process via /proc to contain silly segment register values.
1880 * (The unfortunate part is that we can end up discovering the bad segment
1881 * register value in the middle of an 'iret' after we've popped most of the
1882 * stack. So it becomes quite difficult to associate an accurate ucontext
1883 * with the lwp, because the act of taking the #gp trap overwrites most of
1884 * what we were going to send the lwp.)
1886 * OTOH if it turns out that's -not- the problem, and we're -not- an lwp
1887 * trying to return to user mode and we get a #gp fault, then we need
1888 * to die() -- which will happen if we return non-zero from this routine.
1891 kern_gpfault(struct regs
*rp
)
1893 kthread_t
*t
= curthread
;
1894 proc_t
*p
= ttoproc(t
);
1895 klwp_t
*lwp
= ttolwp(t
);
1896 struct regs tmpregs
, *trp
= NULL
;
1897 caddr_t pc
= (caddr_t
)rp
->r_pc
;
1899 uint32_t auditing
= AU_AUDITING();
1902 * if we're not an lwp, or in the case of running native the
1903 * pc range is outside _sys_rtt, then we should immediately
1904 * be die()ing horribly.
1906 if (lwp
== NULL
|| !instr_is_sys_rtt(pc
))
1910 * So at least we're in the right part of the kernel.
1912 * Disassemble the instruction at the faulting pc.
1913 * Once we know what it is, we carefully reconstruct the stack
1914 * based on the order in which the stack is deconstructed in
1917 if (instr_is_iret(pc
)) {
1919 * We took the #gp while trying to perform the IRET.
1920 * This means that either %cs or %ss are bad.
1921 * All we know for sure is that most of the general
1922 * registers have been restored, including the
1923 * segment registers, and all we have left on the
1924 * topmost part of the lwp's stack are the
1925 * registers that the iretq was unable to consume.
1927 * All the rest of the state was crushed by the #gp
1928 * which pushed -its- registers atop our old save area
1929 * (because we had to decrement the stack pointer, sigh) so
1930 * all that we can try and do is to reconstruct the
1931 * crushed frame from the #gp trap frame itself.
1934 trp
->r_ss
= lwptoregs(lwp
)->r_ss
;
1935 trp
->r_sp
= lwptoregs(lwp
)->r_sp
;
1936 trp
->r_ps
= lwptoregs(lwp
)->r_ps
;
1937 trp
->r_cs
= lwptoregs(lwp
)->r_cs
;
1938 trp
->r_pc
= lwptoregs(lwp
)->r_pc
;
1939 bcopy(rp
, trp
, offsetof(struct regs
, r_pc
));
1942 * Validate simple math
1944 ASSERT(trp
->r_pc
== lwptoregs(lwp
)->r_pc
);
1945 ASSERT(trp
->r_err
== rp
->r_err
);
1951 #if defined(__amd64)
1952 if (trp
== NULL
&& lwp
->lwp_pcb
.pcb_rupdate
!= 0) {
1955 * This is the common case -- we're trying to load
1956 * a bad segment register value in the only section
1957 * of kernel code that ever loads segment registers.
1959 * We don't need to do anything at this point because
1960 * the pcb contains all the pending segment register
1961 * state, and the regs are still intact because we
1962 * didn't adjust the stack pointer yet. Given the fidelity
1963 * of all this, we could conceivably send a signal
1964 * to the lwp, rather than core-ing.
1966 trp
= lwptoregs(lwp
);
1967 ASSERT((caddr_t
)trp
== (caddr_t
)rp
->r_sp
);
1970 #elif defined(__i386)
1972 if (trp
== NULL
&& instr_is_segregs_pop(pc
))
1973 trp
= lwptoregs(lwp
);
1981 * If we get to here, we're reasonably confident that we've
1982 * correctly decoded what happened on the way out of the kernel.
1983 * Rewrite the lwp's registers so that we can create a core dump
1984 * the (at least vaguely) represents the mcontext we were
1985 * being asked to restore when things went so terribly wrong.
1989 * Make sure that we have a meaningful %trapno and %err.
1991 trp
->r_trapno
= rp
->r_trapno
;
1992 trp
->r_err
= rp
->r_err
;
1994 if ((caddr_t
)trp
!= (caddr_t
)lwptoregs(lwp
))
1995 bcopy(trp
, lwptoregs(lwp
), sizeof (*trp
));
1998 mutex_enter(&p
->p_lock
);
1999 lwp
->lwp_cursig
= SIGSEGV
;
2000 mutex_exit(&p
->p_lock
);
2003 * Terminate all LWPs but don't discard them. If another lwp beat
2004 * us to the punch by calling exit(), evaporate now.
2007 if (exitlwps(1) != 0) {
2008 mutex_enter(&p
->p_lock
);
2012 if (auditing
) /* audit core dump */
2013 audit_core_start(SIGSEGV
);
2014 v
= core(SIGSEGV
, B_FALSE
);
2015 if (auditing
) /* audit core dump */
2016 audit_core_finish(v
? CLD_KILLED
: CLD_DUMPED
);
2017 exit(v
? CLD_KILLED
: CLD_DUMPED
, SIGSEGV
);
2022 * dump_tss() - Display the TSS structure
2026 #if defined(__amd64)
2031 const char tss_fmt
[] = "tss.%s:\t0x%p\n"; /* Format string */
2032 tss_t
*tss
= CPU
->cpu_tss
;
2034 printf(tss_fmt
, "tss_rsp0", (void *)tss
->tss_rsp0
);
2035 printf(tss_fmt
, "tss_rsp1", (void *)tss
->tss_rsp1
);
2036 printf(tss_fmt
, "tss_rsp2", (void *)tss
->tss_rsp2
);
2038 printf(tss_fmt
, "tss_ist1", (void *)tss
->tss_ist1
);
2039 printf(tss_fmt
, "tss_ist2", (void *)tss
->tss_ist2
);
2040 printf(tss_fmt
, "tss_ist3", (void *)tss
->tss_ist3
);
2041 printf(tss_fmt
, "tss_ist4", (void *)tss
->tss_ist4
);
2042 printf(tss_fmt
, "tss_ist5", (void *)tss
->tss_ist5
);
2043 printf(tss_fmt
, "tss_ist6", (void *)tss
->tss_ist6
);
2044 printf(tss_fmt
, "tss_ist7", (void *)tss
->tss_ist7
);
2047 #elif defined(__i386)
2052 const char tss_fmt
[] = "tss.%s:\t0x%p\n"; /* Format string */
2053 tss_t
*tss
= CPU
->cpu_tss
;
2055 printf(tss_fmt
, "tss_link", (void *)(uintptr_t)tss
->tss_link
);
2056 printf(tss_fmt
, "tss_esp0", (void *)(uintptr_t)tss
->tss_esp0
);
2057 printf(tss_fmt
, "tss_ss0", (void *)(uintptr_t)tss
->tss_ss0
);
2058 printf(tss_fmt
, "tss_esp1", (void *)(uintptr_t)tss
->tss_esp1
);
2059 printf(tss_fmt
, "tss_ss1", (void *)(uintptr_t)tss
->tss_ss1
);
2060 printf(tss_fmt
, "tss_esp2", (void *)(uintptr_t)tss
->tss_esp2
);
2061 printf(tss_fmt
, "tss_ss2", (void *)(uintptr_t)tss
->tss_ss2
);
2062 printf(tss_fmt
, "tss_cr3", (void *)(uintptr_t)tss
->tss_cr3
);
2063 printf(tss_fmt
, "tss_eip", (void *)(uintptr_t)tss
->tss_eip
);
2064 printf(tss_fmt
, "tss_eflags", (void *)(uintptr_t)tss
->tss_eflags
);
2065 printf(tss_fmt
, "tss_eax", (void *)(uintptr_t)tss
->tss_eax
);
2066 printf(tss_fmt
, "tss_ebx", (void *)(uintptr_t)tss
->tss_ebx
);
2067 printf(tss_fmt
, "tss_ecx", (void *)(uintptr_t)tss
->tss_ecx
);
2068 printf(tss_fmt
, "tss_edx", (void *)(uintptr_t)tss
->tss_edx
);
2069 printf(tss_fmt
, "tss_esp", (void *)(uintptr_t)tss
->tss_esp
);
2072 #endif /* __amd64 */
2075 #if defined(TRAPTRACE)
2077 int ttrace_nrec
= 10; /* number of records to dump out */
2078 int ttrace_dump_nregs
= 0; /* dump out this many records with regs too */
2081 * Dump out the last ttrace_nrec traptrace records on each CPU
2086 trap_trace_ctl_t
*ttc
;
2087 trap_trace_rec_t
*rec
;
2091 #if defined(__amd64)
2092 const char banner
[] =
2093 "CPU ADDRESS TIMESTAMP TYPE VC HANDLER PC\n";
2094 /* Define format for the CPU, ADDRESS, and TIMESTAMP fields */
2095 const char fmt1
[] = "%3d %016lx %12llx";
2096 char data1
[34]; /* length of string formatted by fmt1 + 1 */
2097 #elif defined(__i386)
2098 const char banner
[] =
2099 "CPU ADDRESS TIMESTAMP TYPE VC HANDLER PC\n";
2100 /* Define format for the CPU, ADDRESS, and TIMESTAMP fields */
2101 const char fmt1
[] = "%3d %08lx %12llx";
2102 char data1
[26]; /* length of string formatted by fmt1 + 1 */
2104 /* Define format for the TYPE and VC fields */
2105 const char fmt2
[] = "%4s %3x";
2106 char data2
[9]; /* length of string formatted by fmt2 + 1 */
2108 * Define format for the HANDLER field. Width is arbitrary, but should
2109 * be enough for common handler's names, and leave enough space for
2110 * the PC field, especially when we are in kmdb.
2112 const char fmt3h
[] = "#%-15s";
2113 const char fmt3p
[] = "%-16p";
2114 const char fmt3s
[] = "%-16s";
2115 char data3
[17]; /* length of string formatted by fmt3* + 1 */
2117 if (ttrace_nrec
== 0)
2123 for (i
= 0; i
< n
; i
++) {
2124 ttc
= &trap_trace_ctl
[i
];
2125 if (ttc
->ttc_first
== NULL
)
2128 current
= ttc
->ttc_next
- sizeof (trap_trace_rec_t
);
2129 for (j
= 0; j
< ttrace_nrec
; j
++) {
2131 struct autovec
*vec
;
2132 extern struct av_head autovect
[];
2137 if (current
< ttc
->ttc_first
)
2139 ttc
->ttc_limit
- sizeof (trap_trace_rec_t
);
2141 if (current
== NULL
)
2144 rec
= (trap_trace_rec_t
*)current
;
2146 if (rec
->ttr_stamp
== 0)
2149 (void) snprintf(data1
, sizeof (data1
), fmt1
, i
,
2150 (uintptr_t)rec
, rec
->ttr_stamp
);
2152 switch (rec
->ttr_marker
) {
2157 #if defined(__amd64)
2158 sys
= &sysent32
[rec
->ttr_sysnum
];
2159 switch (rec
->ttr_marker
) {
2161 sys
= &sysent
[rec
->ttr_sysnum
];
2163 #elif defined(__i386)
2164 sys
= &sysent
[rec
->ttr_sysnum
];
2165 switch (rec
->ttr_marker
) {
2169 stype
= "sysc"; /* syscall */
2172 stype
= "lcal"; /* lcall */
2175 stype
= "syse"; /* sysenter */
2180 (void) snprintf(data2
, sizeof (data2
), fmt2
,
2181 stype
, rec
->ttr_sysnum
);
2183 sym
= kobj_getsymname(
2184 (uintptr_t)sys
->sy_callc
,
2187 (void) snprintf(data3
,
2188 sizeof (data3
), fmt3s
, sym
);
2190 (void) snprintf(data3
,
2191 sizeof (data3
), fmt3p
,
2195 (void) snprintf(data3
, sizeof (data3
),
2201 (void) snprintf(data2
, sizeof (data2
), fmt2
,
2202 "intr", rec
->ttr_vector
);
2203 if (get_intr_handler
!= NULL
)
2204 vec
= (struct autovec
*)
2206 (rec
->ttr_cpuid
, rec
->ttr_vector
);
2209 autovect
[rec
->ttr_vector
].avh_link
;
2212 sym
= kobj_getsymname(
2213 (uintptr_t)vec
->av_vector
, &off
);
2215 (void) snprintf(data3
,
2216 sizeof (data3
), fmt3s
, sym
);
2218 (void) snprintf(data3
,
2219 sizeof (data3
), fmt3p
,
2223 (void) snprintf(data3
, sizeof (data3
),
2230 type
= rec
->ttr_regs
.r_trapno
;
2231 (void) snprintf(data2
, sizeof (data2
), fmt2
,
2233 if (type
< TRAP_TYPES
) {
2234 (void) snprintf(data3
, sizeof (data3
),
2235 fmt3h
, trap_type_mnemonic
[type
]);
2239 (void) snprintf(data3
,
2240 sizeof (data3
), fmt3s
,
2244 (void) snprintf(data3
,
2245 sizeof (data3
), fmt3s
, "");
2255 sym
= kobj_getsymname(rec
->ttr_regs
.r_pc
, &off
);
2257 printf("%s %s %s %s+%lx\n", data1
, data2
, data3
,
2260 printf("%s %s %s %lx\n", data1
, data2
, data3
,
2261 rec
->ttr_regs
.r_pc
);
2264 if (ttrace_dump_nregs
-- > 0) {
2267 if (rec
->ttr_marker
== TT_INTERRUPT
)
2269 "\t\tipl %x spl %x pri %x\n",
2274 dumpregs(&rec
->ttr_regs
);
2276 printf("\t%3s: %p\n\n", " ct",
2277 (void *)rec
->ttr_curthread
);
2280 * print out the pc stack that we recorded
2281 * at trap time (if any)
2283 for (s
= 0; s
< rec
->ttr_sdepth
; s
++) {
2286 if (s
>= TTR_STACK_DEPTH
) {
2287 printf("ttr_sdepth corrupt\n");
2291 fullpc
= (uintptr_t)rec
->ttr_stack
[s
];
2293 sym
= kobj_getsymname(fullpc
, &off
);
2295 printf("-> %s+0x%lx()\n",
2298 printf("-> 0x%lx()\n", fullpc
);
2302 current
-= sizeof (trap_trace_rec_t
);
2307 #endif /* TRAPTRACE */
2310 panic_showtrap(struct panic_trap_info
*tip
)
2312 showregs(tip
->trap_type
, tip
->trap_regs
, tip
->trap_addr
);
2314 #if defined(TRAPTRACE)
2319 if (tip
->trap_type
== T_DBLFLT
)
2325 panic_savetrap(panic_data_t
*pdp
, struct panic_trap_info
*tip
)
2327 panic_saveregs(pdp
, tip
->trap_regs
);