4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * Debugger entry for both master and slave CPUs
34 #include <sys/segments.h>
35 #include <sys/asm_linkage.h>
36 #include <sys/controlregs.h>
37 #include <sys/x86_archext.h>
38 #include <sys/privregs.h>
39 #include <sys/machprivregs.h>
40 #include <sys/kdi_regs.h>
41 #include <sys/uadmin.h>
46 #include <kdi_assym.h>
49 /* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
50 #define GET_CPUSAVE_ADDR \
51 movl
%gs
:CPU_ID
, %ebx; \
53 movl $KRS_SIZE
, %ecx; \
55 movl $kdi_cpusave
, %edx; \
60 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
61 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
62 * debugger through the trap handler. We don't want to clobber the saved IDT
63 * in the process, as we'd end up resuming the world on our IDT.
66 movl
%gs
:CPU_IDT
, %edx; \
67 cmpl $kdi_idt
, %edx; \
69 movl
%edx
, KRS_IDT
(%eax
); \
70 movl
%gs
:CPU_GDT
, %edx; \
71 movl
%edx
, KRS_GDT
(%eax
); \
75 * Given the address of the current CPU's cpusave area in %edi, the following
76 * macro restores the debugging state to said CPU. Restored state includes
77 * the debug registers from the global %dr variables, and debugging MSRs from
78 * the CPU save area. This code would be in a separate routine, but for the
79 * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
80 * the number of jumps taken subsequent to the update of said MSRs. We can
81 * remove one jump (the ret) by using a macro instead of a function for the
82 * debugging state restoration code.
84 * Takes the cpusave area in %edi as a parameter, clobbers %eax-%edx
86 #define KDI_RESTORE_DEBUGGING_STATE \
87 leal kdi_drreg
, %ebx; \
94 pushl $KDIREG_DRSTAT_RESERVED; \
99 pushl DRADDR_OFF
(0)(%ebx
); \
104 pushl DRADDR_OFF
(1)(%ebx
); \
109 pushl DRADDR_OFF
(2)(%ebx
); \
114 pushl DRADDR_OFF
(3)(%ebx
); \
120 * Write any requested MSRs. \
122 movl KRS_MSR
(%edi
), %ebx; \
126 movl MSR_NUM
(%ebx
), %ecx; \
130 movl MSR_TYPE
(%ebx
), %edx; \
131 cmpl $KDI_MSR_WRITE
, %edx; \
134 movl MSR_VALP
(%ebx
), %edx; \
135 movl
0(%edx
), %eax; \
136 movl
4(%edx
), %edx; \
139 addl $MSR_SIZE
, %ebx; \
143 * We must not branch after re-enabling LBR. If \
144 * kdi_wsr_wrexit_msr is set, it contains the number \
145 * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
146 * contains the value that is to be written to enable \
149 movl kdi_msr_wrexit_msr
, %ecx; \
153 movl kdi_msr_wrexit_valp
, %edx; \
154 movl
0(%edx
), %eax; \
155 movl
4(%edx
), %edx; \
160 #define KDI_RESTORE_REGS() \
161 /* Discard savfp and savpc */ \
169 /* Discard trapno and err */ \
173 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
174 * The following macros manage the buffer.
177 /* Advance the ring buffer */
178 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
179 movl KRS_CURCRUMBIDX
(cpusave
), tmp1; \
180 cmpl $
[KDI_NCRUMBS
- 1], tmp1; \
182 /* Advance the pointer and index */ \
184 movl tmp1
, KRS_CURCRUMBIDX
(cpusave
); \
185 movl KRS_CURCRUMB
(cpusave
), tmp1; \
186 addl $KRM_SIZE
, tmp1; \
188 1: /* Reset the pointer and index */ \
189 movw $
0, KRS_CURCRUMBIDX
(cpusave
); \
190 leal KRS_CRUMBS
(cpusave
), tmp1; \
191 2: movl tmp1
, KRS_CURCRUMB
(cpusave
); \
192 /* Clear the new crumb */ \
193 movl $KDI_NCRUMBS
, tmp2; \
194 3: movl $
0, -4(tmp1
, tmp2
, 4); \
198 /* Set a value in the current breadcrumb buffer */
199 #define ADD_CRUMB(cpusave, offset, value, tmp) \
200 movl KRS_CURCRUMB
(cpusave
), tmp; \
201 movl value
, offset
(tmp
)
206 * The main entry point for master CPUs. It also serves as the trap handler
207 * for all traps and interrupts taken during single-step.
210 /* XXX implement me */
217 ALTENTRY
(kdi_master_entry
)
219 /* Save all registers and selectors */
229 movl
%ebp
, REG_OFF
(KDIREG_SAVFP
)(%esp
)
230 movl REG_OFF
(KDIREG_EIP
)(%esp
), %eax
231 movl
%eax
, REG_OFF
(KDIREG_SAVPC
)(%esp
)
234 * If the kernel has started using its own selectors, we should too.
235 * Update our saved selectors if they haven't been updated already.
239 jne
1f
/* The kernel hasn't switched yet */
245 je
1f
/* We already switched */
248 * The kernel switched, but we haven't. Update our saved selectors
249 * to match the kernel's copies for use below.
251 movl $KCS_SEL
, kdi_cs
252 movl $KDS_SEL
, kdi_ds
253 movl $KFS_SEL
, kdi_fs
254 movl $KGS_SEL
, kdi_gs
258 * Set the selectors to a known state. If we come in from kmdb's IDT,
259 * we'll be on boot's %cs. This will cause GET_CPUSAVE_ADDR to return
260 * CPU 0's cpusave, regardless of which CPU we're on, and chaos will
261 * ensue. So, if we've got $KCSSEL in kdi_cs, switch to it. The other
262 * selectors are restored normally.
276 * This has to come after we set %gs to the kernel descriptor. Since
277 * we've hijacked some IDT entries used in user-space such as the
278 * breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
279 * in %gs. On the hypervisor, CLI() needs GDT_GS to access the machcpu.
284 GET_CPUSAVE_ADDR
/* %eax = cpusave, %ebx = CPU ID */
286 ADVANCE_CRUMB_POINTER
(%eax
, %ecx
, %edx
)
288 ADD_CRUMB
(%eax
, KRM_CPU_STATE
, $KDI_CPU_STATE_MASTER
, %edx
)
290 movl REG_OFF
(KDIREG_EIP
)(%esp
), %ecx
291 ADD_CRUMB
(%eax
, KRM_PC
, %ecx
, %edx
)
292 ADD_CRUMB
(%eax
, KRM_SP
, %esp
, %edx
)
293 movl REG_OFF
(KDIREG_TRAPNO
)(%esp
), %ecx
294 ADD_CRUMB
(%eax
, KRM_TRAPNO
, %ecx
, %edx
)
300 * Were we in the debugger when we took the trap (i.e. was %esp in one
301 * of the debugger's memory ranges)?
303 leal kdi_memranges
, %ecx
304 movl kdi_nmemranges
, %edx
305 1: cmpl MR_BASE
(%ecx
), %esp
306 jl
2f
/* below this range -- try the next one */
307 cmpl MR_LIM
(%ecx
), %esp
308 jg
2f
/* above this range -- try the next one */
309 jmp
3f
/* matched within this range */
312 jz kdi_save_common_state
/* %esp not within debugger memory */
317 * %esp was within one of the debugger's memory ranges. This should
318 * only happen when we take a trap while running in the debugger.
319 * kmdb_dpi_handle_fault will determine whether or not it was an
320 * expected trap, and will take the appropriate action.
323 pushl
%ebx
/* cpuid */
325 movl REG_OFF
(KDIREG_ESP
)(%ebp
), %ecx
326 addl $REG_OFF
(KDIREG_EFLAGS
- KDIREG_EAX
), %ecx
329 pushl REG_OFF
(KDIREG_EIP
)(%ebp
)
330 pushl REG_OFF
(KDIREG_TRAPNO
)(%ebp
)
332 call kdi_dvec_handle_fault
336 * If we're here, we ran into a debugger problem, and the user
337 * elected to solve it by having the debugger debug itself. The
338 * state we're about to save is that of the debugger when it took
342 jmp kdi_save_common_state
344 SET_SIZE
(kdi_master_entry
)
349 * The cross-call handler for slave CPUs.
351 * The debugger is single-threaded, so only one CPU, called the master, may be
352 * running it at any given time. The other CPUs, known as slaves, spin in a
353 * busy loop until there's something for them to do. This is the entry point
354 * for the slaves - they'll be sent here in response to a cross-call sent by the
358 .globl kdi_slave_entry_patch;
360 ENTRY_NP
(kdi_slave_entry
)
362 /* kdi_msr_add_clrentry knows where this is */
363 kdi_slave_entry_patch
:
367 * Cross calls are implemented as function calls, so our stack
368 * currently looks like one you'd get from a zero-argument function
369 * call. There's an %eip at %esp, and that's about it. We want to
370 * make it look like the master CPU's stack. By doing this, we can
371 * use the same resume code for both master and slave. We need to
372 * make our stack look like a `struct regs' before we jump into the
373 * common save routine.
378 pushl $
-1 /* A phony trap error code */
379 pushl $
-1 /* A phony trap number */
388 movl
%ebp
, REG_OFF
(KDIREG_SAVFP
)(%esp
)
389 movl REG_OFF
(KDIREG_EIP
)(%esp
), %eax
390 movl
%eax
, REG_OFF
(KDIREG_SAVPC
)(%esp
)
393 * Swap our saved EFLAGS and %eip. Each is where the other
396 movl REG_OFF
(KDIREG_EFLAGS
)(%esp
), %eax
397 xchgl REG_OFF
(KDIREG_EIP
)(%esp
), %eax
398 movl
%eax
, REG_OFF
(KDIREG_EFLAGS
)(%esp
)
401 * Our stack now matches struct regs, and is irettable. We don't need
402 * to do anything special for the hypervisor w.r.t. PS_IE since we
403 * iret twice anyway; the second iret back to the hypervisor
404 * will re-enable interrupts.
408 /* Load sanitized segment selectors */
415 GET_CPUSAVE_ADDR
/* %eax = cpusave, %ebx = CPU ID */
417 ADVANCE_CRUMB_POINTER
(%eax
, %ecx
, %edx
)
419 ADD_CRUMB
(%eax
, KRM_CPU_STATE
, $KDI_CPU_STATE_SLAVE
, %edx
)
421 movl REG_OFF
(KDIREG_EIP
)(%esp
), %ecx
422 ADD_CRUMB
(%eax
, KRM_PC
, %ecx
, %edx
)
425 jmp kdi_save_common_state
427 SET_SIZE
(kdi_slave_entry
)
431 * The state of the world:
433 * The stack has a complete set of saved registers and segment
434 * selectors, arranged in `struct regs' order (or vice-versa), up to
435 * and including EFLAGS. It also has a pointer to our cpusave area.
437 * We need to save a pointer to these saved registers. We also want
438 * to adjust the saved %esp - it should point just beyond the saved
439 * registers to the last frame of the thread we interrupted. Finally,
440 * we want to clear out bits 16-31 of the saved selectors, as the
441 * selector pushls don't automatically clear them.
444 ENTRY_NP
(kdi_save_common_state
)
446 popl
%eax
/* the cpusave area */
448 movl
%esp
, KRS_GREGS
(%eax
) /* save ptr to current saved regs */
450 addl $REG_OFF
(KDIREG_EFLAGS
- KDIREG_EAX
), KDIREG_OFF
(KDIREG_ESP
)(%esp
)
452 andl $
0xffff, KDIREG_OFF
(KDIREG_SS
)(%esp
)
453 andl $
0xffff, KDIREG_OFF
(KDIREG_GS
)(%esp
)
454 andl $
0xffff, KDIREG_OFF
(KDIREG_FS
)(%esp
)
455 andl $
0xffff, KDIREG_OFF
(KDIREG_ES
)(%esp
)
456 andl $
0xffff, KDIREG_OFF
(KDIREG_DS
)(%esp
)
461 je kdi_pass_to_kernel
466 /* Save off %cr0, and clear write protect */
468 movl
%ecx
, KRS_CR0
(%eax
)
469 andl $_BITNOT
(CR0_WP
), %ecx
474 /* Save the debug registers and disable any active watchpoints */
479 movl
%eax
, KRS_DRCTL
(%edi
)
480 andl $_BITNOT
(KDIREG_DRCTL_WPALLEN_MASK
), %eax
490 movl
%eax
, KRS_DRSTAT
(%edi
)
495 movl
%eax
, KRS_DROFF
(0)(%edi
)
500 movl
%eax
, KRS_DROFF
(1)(%edi
)
505 movl
%eax
, KRS_DROFF
(2)(%edi
)
510 movl
%eax
, KRS_DROFF
(3)(%edi
)
516 * Save any requested MSRs.
518 movl KRS_MSR
(%eax
), %ecx
522 pushl
%eax
/* rdmsr clobbers %eax */
525 movl MSR_NUM
(%ebx
), %ecx
529 movl MSR_TYPE
(%ebx
), %edx
530 cmpl $KDI_MSR_READ
, %edx
533 rdmsr
/* addr in %ecx, value into %edx:%eax */
534 movl
%eax
, MSR_VAL
(%ebx
)
535 movl
%edx
, _CONST
(MSR_VAL
+ 4)(%ebx
)
545 clr
%ebp
/* stack traces should end here */
548 call kdi_debugger_entry
553 SET_SIZE
(kdi_save_common_state
)
557 * Resume the world. The code that calls kdi_resume has already
558 * decided whether or not to restore the IDT.
561 /* cpusave in %eax */
565 * Send this CPU back into the world
568 movl KRS_CR0
(%eax
), %edx
574 KDI_RESTORE_DEBUGGING_STATE
579 addl $
8, %esp
/* Discard savfp and savpc */
588 addl $
8, %esp
/* Discard TRAPNO and ERROR */
595 ENTRY_NP
(kdi_pass_to_kernel
)
597 /* pop cpusave, leaving %esp pointing to saved regs */
600 movl $KDI_CPU_STATE_NONE
, KRS_CPU_STATE
(%eax
)
603 * Find the trap and vector off the right kernel handler. The trap
604 * handler will expect the stack to be in trap order, with %eip being
605 * the last entry, so we'll need to restore all our regs.
607 * We're hard-coding the three cases where KMDB has installed permanent
608 * handlers, since after we restore, we don't have registers to work
609 * with; we can't use a global since other CPUs can easily pass through
610 * here at the same time.
612 * Note that we handle T_DBGENTR since userspace might have tried it.
614 movl REG_OFF
(KDIREG_TRAPNO
)(%esp
), %eax
619 cmpl $T_DBGENTR
, %eax
622 * Hmm, unknown handler. Somebody forgot to update this when they
623 * added a new trap interposition... try to drop back into kmdb.
645 SET_SIZE
(kdi_pass_to_kernel
)
648 * A minimal version of mdboot(), to be used by the master CPU only.
664 ENTRY_NP
(kdi_cpu_debug_init
)
673 KDI_RESTORE_DEBUGGING_STATE
680 SET_SIZE
(kdi_cpu_debug_init
)