4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * Debugger entry for both master and slave CPUs
34 #include <sys/segments.h>
35 #include <sys/asm_linkage.h>
36 #include <sys/controlregs.h>
37 #include <sys/x86_archext.h>
38 #include <sys/privregs.h>
39 #include <sys/machprivregs.h>
40 #include <sys/kdi_regs.h>
42 #include <sys/uadmin.h>
46 #include <kdi_assym.h>
49 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
50 #define GET_CPUSAVE_ADDR \
51 movzbq
%gs
:CPU_ID
, %rbx; \
53 movq $KRS_SIZE
, %rcx; \
55 movq $kdi_cpusave
, %rdx; \
60 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
61 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
62 * debugger through the trap handler. We don't want to clobber the saved IDT
63 * in the process, as we'd end up resuming the world on our IDT.
66 movq
%gs
:CPU_IDT
, %r11; \
67 leaq kdi_idt
(%rip
), %rsi; \
70 movq
%r11, KRS_IDT
(%rax
); \
71 movq
%gs
:CPU_GDT
, %r11; \
72 movq
%r11, KRS_GDT
(%rax
); \
77 #define SAVE_GSBASE(reg) /* nothing */
78 #define RESTORE_GSBASE(reg) /* nothing */
82 #define SAVE_GSBASE(base) \
83 movl $MSR_AMD_GSBASE
, %ecx; \
87 movq
%rdx
, REG_OFF
(KDIREG_GSBASE
)(base
)
89 #define RESTORE_GSBASE(base) \
90 movq REG_OFF
(KDIREG_GSBASE
)(base
), %rdx; \
93 movl $MSR_AMD_GSBASE
, %ecx; \
99 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. Note
100 * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
103 #define KDI_SAVE_REGS(base) \
104 movq
%rdi
, REG_OFF
(KDIREG_RDI
)(base
); \
105 movq
%rsi
, REG_OFF
(KDIREG_RSI
)(base
); \
106 movq
%rdx
, REG_OFF
(KDIREG_RDX
)(base
); \
107 movq
%rcx
, REG_OFF
(KDIREG_RCX
)(base
); \
108 movq
%r8, REG_OFF
(KDIREG_R8
)(base
); \
109 movq
%r9, REG_OFF
(KDIREG_R9
)(base
); \
110 movq
%rax
, REG_OFF
(KDIREG_RAX
)(base
); \
111 movq
%rbx
, REG_OFF
(KDIREG_RBX
)(base
); \
112 movq
%rbp
, REG_OFF
(KDIREG_RBP
)(base
); \
113 movq
%r10, REG_OFF
(KDIREG_R10
)(base
); \
114 movq
%r11, REG_OFF
(KDIREG_R11
)(base
); \
115 movq
%r12, REG_OFF
(KDIREG_R12
)(base
); \
116 movq
%r13, REG_OFF
(KDIREG_R13
)(base
); \
117 movq
%r14, REG_OFF
(KDIREG_R14
)(base
); \
118 movq
%r15, REG_OFF
(KDIREG_R15
)(base
); \
119 movq
%rbp
, REG_OFF
(KDIREG_SAVFP
)(base
); \
120 movq REG_OFF
(KDIREG_RIP
)(base
), %rax; \
121 movq
%rax
, REG_OFF
(KDIREG_SAVPC
)(base
); \
124 movq
%rax
, REG_OFF
(KDIREG_DS
)(base
); \
126 movq
%rax
, REG_OFF
(KDIREG_ES
)(base
); \
128 movq
%rax
, REG_OFF
(KDIREG_FS
)(base
); \
130 movq
%rax
, REG_OFF
(KDIREG_GS
)(base
); \
133 #define KDI_RESTORE_REGS(base) \
135 RESTORE_GSBASE
(%rdi
); \
136 movq REG_OFF
(KDIREG_ES
)(%rdi
), %rax; \
138 movq REG_OFF
(KDIREG_DS
)(%rdi
), %rax; \
140 movq REG_OFF
(KDIREG_R15
)(%rdi
), %r15; \
141 movq REG_OFF
(KDIREG_R14
)(%rdi
), %r14; \
142 movq REG_OFF
(KDIREG_R13
)(%rdi
), %r13; \
143 movq REG_OFF
(KDIREG_R12
)(%rdi
), %r12; \
144 movq REG_OFF
(KDIREG_R11
)(%rdi
), %r11; \
145 movq REG_OFF
(KDIREG_R10
)(%rdi
), %r10; \
146 movq REG_OFF
(KDIREG_RBP
)(%rdi
), %rbp; \
147 movq REG_OFF
(KDIREG_RBX
)(%rdi
), %rbx; \
148 movq REG_OFF
(KDIREG_RAX
)(%rdi
), %rax; \
149 movq REG_OFF
(KDIREG_R9
)(%rdi
), %r9; \
150 movq REG_OFF
(KDIREG_R8
)(%rdi
), %r8; \
151 movq REG_OFF
(KDIREG_RCX
)(%rdi
), %rcx; \
152 movq REG_OFF
(KDIREG_RDX
)(%rdi
), %rdx; \
153 movq REG_OFF
(KDIREG_RSI
)(%rdi
), %rsi; \
154 movq REG_OFF
(KDIREG_RDI
)(%rdi
), %rdi
157 * Given the address of the current CPU's cpusave area in %rax, the following
158 * macro restores the debugging state to said CPU. Restored state includes
159 * the debug registers from the global %dr variables, and debugging MSRs from
160 * the CPU save area. This code would be in a separate routine, but for the
161 * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
162 * the number of jumps taken subsequent to the update of said MSRs. We can
163 * remove one jump (the ret) by using a macro instead of a function for the
164 * debugging state restoration code.
166 * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
168 #define KDI_RESTORE_DEBUGGING_STATE \
170 leaq kdi_drreg
(%rip
), %r15; \
172 movq DR_CTL
(%r15), %rsi; \
176 movq $KDIREG_DRSTAT_RESERVED
, %rsi; \
180 movq DRADDR_OFF
(0)(%r15), %rsi; \
183 movq DRADDR_OFF
(1)(%r15), %rsi; \
186 movq DRADDR_OFF
(2)(%r15), %rsi; \
189 movq DRADDR_OFF
(3)(%r15), %rsi; \
194 * Write any requested MSRs. \
196 movq KRS_MSR
(%rdi
), %rbx; \
200 movl MSR_NUM
(%rbx
), %ecx; \
204 movl MSR_TYPE
(%rbx
), %edx; \
205 cmpl $KDI_MSR_WRITE
, %edx; \
208 movq MSR_VALP
(%rbx
), %rdx; \
209 movl
0(%rdx
), %eax; \
210 movl
4(%rdx
), %edx; \
213 addq $MSR_SIZE
, %rbx; \
217 * We must not branch after re-enabling LBR. If \
218 * kdi_wsr_wrexit_msr is set, it contains the number \
219 * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
220 * contains the value that is to be written to enable \
223 leaq kdi_msr_wrexit_msr
(%rip
), %rcx; \
228 leaq kdi_msr_wrexit_valp
(%rip
), %rdx; \
230 movl
0(%rdx
), %eax; \
231 movl
4(%rdx
), %edx; \
237 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
238 * The following macros manage the buffer.
241 /* Advance the ring buffer */
242 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
243 movq KRS_CURCRUMBIDX
(cpusave
), tmp1; \
244 cmpq $
[KDI_NCRUMBS
- 1], tmp1; \
246 /* Advance the pointer and index */ \
248 movq tmp1
, KRS_CURCRUMBIDX
(cpusave
); \
249 movq KRS_CURCRUMB
(cpusave
), tmp1; \
250 addq $KRM_SIZE
, tmp1; \
252 1: /* Reset the pointer and index */ \
253 movq $
0, KRS_CURCRUMBIDX
(cpusave
); \
254 leaq KRS_CRUMBS
(cpusave
), tmp1; \
255 2: movq tmp1
, KRS_CURCRUMB
(cpusave
); \
256 /* Clear the new crumb */ \
257 movq $KDI_NCRUMBS
, tmp2; \
258 3: movq $
0, -4(tmp1
, tmp2
, 4); \
262 /* Set a value in the current breadcrumb buffer */
263 #define ADD_CRUMB(cpusave, offset, value, tmp) \
264 movq KRS_CURCRUMB
(cpusave
), tmp; \
265 movq value
, offset
(tmp
)
270 /* XXX implement me */
277 * The main entry point for master CPUs. It also serves as the trap
278 * handler for all traps and interrupts taken during single-step.
281 ALTENTRY
(kdi_master_entry
)
287 /* Save current register state */
288 subq $REG_OFF
(KDIREG_TRAPNO
), %rsp
293 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
294 * KGSBASE can be trusted, as the kernel may or may not have already
295 * done a swapgs. All is not lost, as the kernel can divine the correct
296 * value for us. Note that the previous GSBASE is saved in the
297 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
298 * blown away. On the hypervisor, we don't need to do this, since it's
299 * ensured we're on our requested kernel GSBASE already.
303 movq
2(%rsp
), %rdi
/* gdt base now in %rdi */
305 call kdi_gdt2gsbase
/* returns kernel's GSBASE in %rax */
309 movl $MSR_AMD_GSBASE
, %ecx
312 GET_CPUSAVE_ADDR
/* %rax = cpusave, %rbx = CPU ID */
314 ADVANCE_CRUMB_POINTER
(%rax
, %rcx
, %rdx
)
316 ADD_CRUMB
(%rax
, KRM_CPU_STATE
, $KDI_CPU_STATE_MASTER
, %rdx
)
318 movq REG_OFF
(KDIREG_RIP
)(%rsp
), %rcx
319 ADD_CRUMB
(%rax
, KRM_PC
, %rcx
, %rdx
)
320 ADD_CRUMB
(%rax
, KRM_SP
, %rsp
, %rdx
)
321 movq REG_OFF
(KDIREG_TRAPNO
)(%rsp
), %rcx
322 ADD_CRUMB
(%rax
, KRM_TRAPNO
, %rcx
, %rdx
)
328 * Were we in the debugger when we took the trap (i.e. was %esp in one
329 * of the debugger's memory ranges)?
331 leaq kdi_memranges
, %rcx
332 movl kdi_nmemranges
, %edx
333 1: cmpq MR_BASE
(%rcx
), %rsp
334 jl
2f
/* below this range -- try the next one */
335 cmpq MR_LIM
(%rcx
), %rsp
336 jg
2f
/* above this range -- try the next one */
337 jmp
3f
/* matched within this range */
340 jz kdi_save_common_state
/* %rsp not within debugger memory */
345 * The master is still set. That should only happen if we hit a trap
346 * while running in the debugger. Note that it may be an intentional
347 * fault. kmdb_dpi_handle_fault will sort it all out.
350 movq REG_OFF
(KDIREG_TRAPNO
)(%rbp
), %rdi
351 movq REG_OFF
(KDIREG_RIP
)(%rbp
), %rsi
352 movq REG_OFF
(KDIREG_RSP
)(%rbp
), %rdx
353 movq
%rbx
, %rcx
/* cpuid */
355 call kdi_dvec_handle_fault
358 * If we're here, we ran into a debugger problem, and the user
359 * elected to solve it by having the debugger debug itself. The
360 * state we're about to save is that of the debugger when it took
364 jmp kdi_save_common_state
366 SET_SIZE
(kdi_master_entry
)
371 * The cross-call handler for slave CPUs.
373 * The debugger is single-threaded, so only one CPU, called the master, may be
374 * running it at any given time. The other CPUs, known as slaves, spin in a
375 * busy loop until there's something for them to do. This is the entry point
376 * for the slaves - they'll be sent here in response to a cross-call sent by the
380 .globl kdi_slave_entry_patch;
382 ENTRY_NP
(kdi_slave_entry
)
384 /* kdi_msr_add_clrentry knows where this is */
385 kdi_slave_entry_patch
:
389 * Cross calls are implemented as function calls, so our stack currently
390 * looks like one you'd get from a zero-argument function call. That
391 * is, there's the return %rip at %rsp, and that's about it. We need
392 * to make it look like an interrupt stack. When we first save, we'll
393 * reverse the saved %ss and %rip, which we'll fix back up when we've
394 * freed up some general-purpose registers. We'll also need to fix up
398 pushq
%rsp
/* pushed value off by 8 */
404 pushq
%rax
/* rip should be here */
405 pushq $
-1 /* phony trap error code */
406 pushq $
-1 /* phony trap number */
408 subq $REG_OFF
(KDIREG_TRAPNO
), %rsp
411 movq REG_OFF
(KDIREG_SS
)(%rsp
), %rax
412 xchgq REG_OFF
(KDIREG_RIP
)(%rsp
), %rax
413 movq
%rax
, REG_OFF
(KDIREG_SS
)(%rsp
)
415 movq REG_OFF
(KDIREG_RSP
)(%rsp
), %rax
417 movq
%rax
, REG_OFF
(KDIREG_RSP
)(%rsp
)
420 * We've saved all of the general-purpose registers, and have a stack
421 * that is irettable (after we strip down to the error code)
424 GET_CPUSAVE_ADDR
/* %rax = cpusave, %rbx = CPU ID */
426 ADVANCE_CRUMB_POINTER
(%rax
, %rcx
, %rdx
)
428 ADD_CRUMB
(%rax
, KRM_CPU_STATE
, $KDI_CPU_STATE_SLAVE
, %rdx
)
430 movq REG_OFF
(KDIREG_RIP
)(%rsp
), %rcx
431 ADD_CRUMB
(%rax
, KRM_PC
, %rcx
, %rdx
)
434 jmp kdi_save_common_state
436 SET_SIZE
(kdi_slave_entry
)
440 * The state of the world:
442 * The stack has a complete set of saved registers and segment
443 * selectors, arranged in the kdi_regs.h order. It also has a pointer
444 * to our cpusave area.
446 * We need to save, into the cpusave area, a pointer to these saved
447 * registers. First we check whether we should jump straight back to
448 * the kernel. If not, we save a few more registers, ready the
449 * machine for debugger entry, and enter the debugger.
453 ENTRY_NP
(kdi_save_common_state
)
455 popq
%rdi
/* the cpusave area */
456 movq
%rsp
, KRS_GREGS
(%rdi
) /* save ptr to current saved regs */
461 je kdi_pass_to_kernel
462 popq
%rax
/* cpusave in %rax */
466 /* Save off %cr0, and clear write protect */
468 movq
%rcx
, KRS_CR0
(%rax
)
469 andq $_BITNOT
(CR0_WP
), %rcx
472 /* Save the debug registers and disable any active watchpoints */
474 movq
%rax
, %r15 /* save cpusave area ptr */
477 movq
%rax
, KRS_DRCTL
(%r15)
479 andq $_BITNOT
(KDIREG_DRCTL_WPALLEN_MASK
), %rax
486 movq
%rax
, KRS_DRSTAT
(%r15)
490 movq
%rax
, KRS_DROFF
(0)(%r15)
494 movq
%rax
, KRS_DROFF
(1)(%r15)
498 movq
%rax
, KRS_DROFF
(2)(%r15)
502 movq
%rax
, KRS_DROFF
(3)(%r15)
504 movq
%r15, %rax
/* restore cpu save area to rax */
507 * Save any requested MSRs.
509 movq KRS_MSR
(%rax
), %rcx
513 pushq
%rax
/* rdmsr clobbers %eax */
517 movl MSR_NUM
(%rbx
), %ecx
521 movl MSR_TYPE
(%rbx
), %edx
522 cmpl $KDI_MSR_READ
, %edx
525 rdmsr
/* addr in %ecx, value into %edx:%eax */
526 movl
%eax
, MSR_VAL
(%rbx
)
527 movl
%edx
, _CONST
(MSR_VAL
+ 4)(%rbx
)
537 clrq
%rbp
/* stack traces should end here */
540 movq
%rax
, %rdi
/* cpusave */
542 call kdi_debugger_entry
544 /* Pass cpusave to kdi_resume */
549 SET_SIZE
(kdi_save_common_state
)
553 * Resume the world. The code that calls kdi_resume has already
554 * decided whether or not to restore the IDT.
557 /* cpusave in %rdi */
561 * Send this CPU back into the world
563 movq KRS_CR0
(%rdi
), %rdx
566 KDI_RESTORE_DEBUGGING_STATE
568 movq KRS_GREGS
(%rdi
), %rsp
569 KDI_RESTORE_REGS
(%rsp
)
570 addq $REG_OFF
(KDIREG_RIP
), %rsp
/* Discard state, trapno, err */
577 ENTRY_NP
(kdi_pass_to_kernel
)
579 popq
%rdi
/* cpusave */
581 movq $KDI_CPU_STATE_NONE
, KRS_CPU_STATE
(%rdi
)
584 * Find the trap and vector off the right kernel handler. The trap
585 * handler will expect the stack to be in trap order, with %rip being
586 * the last entry, so we'll need to restore all our regs. On i86xpv
587 * we'll need to compensate for XPV_TRAP_POP.
589 * We're hard-coding the three cases where KMDB has installed permanent
590 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
591 * to work with; we can't use a global since other CPUs can easily pass
592 * through here at the same time.
594 * Note that we handle T_DBGENTR since userspace might have tried it.
596 movq KRS_GREGS
(%rdi
), %rsp
597 movq REG_OFF
(KDIREG_TRAPNO
)(%rsp
), %rdi
602 cmpq $T_DBGENTR
, %rdi
605 * Hmm, unknown handler. Somebody forgot to update this when they
606 * added a new trap interposition... try to drop back into kmdb.
610 #define CALL_TRAP_HANDLER(name) \
611 KDI_RESTORE_REGS
(%rsp
); \
612 /* Discard state, trapno, err */ \
613 addq $REG_OFF
(KDIREG_RIP
), %rsp; \
618 CALL_TRAP_HANDLER
(dbgtrap
)
621 CALL_TRAP_HANDLER
(brktrap
)
624 CALL_TRAP_HANDLER
(invaltrap
)
627 SET_SIZE
(kdi_pass_to_kernel
)
630 * A minimal version of mdboot(), to be used by the master CPU only.
635 movl $A_SHUTDOWN
, %esi
644 ENTRY_NP
(kdi_cpu_debug_init
)
648 pushq
%rbx
/* macro will clobber %rbx */
649 KDI_RESTORE_DEBUGGING_STATE
655 SET_SIZE
(kdi_cpu_debug_init
)