dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / arch / x86 / kernel / kdi / kdi_asm_64.s
blobcb6693075f91390355a9f41837fac43b6fd20f84
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * Debugger entry for both master and slave CPUs
34 #include <sys/segments.h>
35 #include <sys/asm_linkage.h>
36 #include <sys/controlregs.h>
37 #include <sys/x86_archext.h>
38 #include <sys/privregs.h>
39 #include <sys/machprivregs.h>
40 #include <sys/kdi_regs.h>
41 #include <sys/psw.h>
42 #include <sys/uadmin.h>
44 #ifdef _ASM
46 #include <kdi_assym.h>
47 #include <assym.h>
49 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
50 #define GET_CPUSAVE_ADDR \
51 movzbq %gs:CPU_ID, %rbx; \
52 movq %rbx, %rax; \
53 movq $KRS_SIZE, %rcx; \
54 mulq %rcx; \
55 movq $kdi_cpusave, %rdx; \
56 /*CSTYLED*/ \
57 addq (%rdx), %rax
60 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
61 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
62 * debugger through the trap handler. We don't want to clobber the saved IDT
63 * in the process, as we'd end up resuming the world on our IDT.
65 #define SAVE_IDTGDT \
66 movq %gs:CPU_IDT, %r11; \
67 leaq kdi_idt(%rip), %rsi; \
68 cmpq %rsi, %r11; \
69 je 1f; \
70 movq %r11, KRS_IDT(%rax); \
71 movq %gs:CPU_GDT, %r11; \
72 movq %r11, KRS_GDT(%rax); \
75 #ifdef __xpv
77 #define SAVE_GSBASE(reg) /* nothing */
78 #define RESTORE_GSBASE(reg) /* nothing */
80 #else
82 #define SAVE_GSBASE(base) \
83 movl $MSR_AMD_GSBASE, %ecx; \
84 rdmsr; \
85 shlq $32, %rdx; \
86 orq %rax, %rdx; \
87 movq %rdx, REG_OFF(KDIREG_GSBASE)(base)
89 #define RESTORE_GSBASE(base) \
90 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
91 movq %rdx, %rax; \
92 shrq $32, %rdx; \
93 movl $MSR_AMD_GSBASE, %ecx; \
94 wrmsr
96 #endif /* __xpv */
99 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. Note
100 * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
101 * unnecessary.
103 #define KDI_SAVE_REGS(base) \
104 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
105 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
106 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
107 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
108 movq %r8, REG_OFF(KDIREG_R8)(base); \
109 movq %r9, REG_OFF(KDIREG_R9)(base); \
110 movq %rax, REG_OFF(KDIREG_RAX)(base); \
111 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
112 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
113 movq %r10, REG_OFF(KDIREG_R10)(base); \
114 movq %r11, REG_OFF(KDIREG_R11)(base); \
115 movq %r12, REG_OFF(KDIREG_R12)(base); \
116 movq %r13, REG_OFF(KDIREG_R13)(base); \
117 movq %r14, REG_OFF(KDIREG_R14)(base); \
118 movq %r15, REG_OFF(KDIREG_R15)(base); \
119 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
120 movq REG_OFF(KDIREG_RIP)(base), %rax; \
121 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
122 clrq %rax; \
123 movw %ds, %ax; \
124 movq %rax, REG_OFF(KDIREG_DS)(base); \
125 movw %es, %ax; \
126 movq %rax, REG_OFF(KDIREG_ES)(base); \
127 movw %fs, %ax; \
128 movq %rax, REG_OFF(KDIREG_FS)(base); \
129 movw %gs, %ax; \
130 movq %rax, REG_OFF(KDIREG_GS)(base); \
131 SAVE_GSBASE(base)
133 #define KDI_RESTORE_REGS(base) \
134 movq base, %rdi; \
135 RESTORE_GSBASE(%rdi); \
136 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
137 movw %ax, %es; \
138 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
139 movw %ax, %ds; \
140 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
141 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
142 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
143 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
144 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
145 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
146 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
147 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
148 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
149 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
150 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
151 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
152 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
153 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
154 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
157 * Given the address of the current CPU's cpusave area in %rax, the following
158 * macro restores the debugging state to said CPU. Restored state includes
159 * the debug registers from the global %dr variables, and debugging MSRs from
160 * the CPU save area. This code would be in a separate routine, but for the
161 * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
162 * the number of jumps taken subsequent to the update of said MSRs. We can
163 * remove one jump (the ret) by using a macro instead of a function for the
164 * debugging state restoration code.
166 * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
168 #define KDI_RESTORE_DEBUGGING_STATE \
169 pushq %rdi; \
170 leaq kdi_drreg(%rip), %r15; \
171 movl $7, %edi; \
172 movq DR_CTL(%r15), %rsi; \
173 call kdi_dreg_set; \
175 movl $6, %edi; \
176 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
177 call kdi_dreg_set; \
179 movl $0, %edi; \
180 movq DRADDR_OFF(0)(%r15), %rsi; \
181 call kdi_dreg_set; \
182 movl $1, %edi; \
183 movq DRADDR_OFF(1)(%r15), %rsi; \
184 call kdi_dreg_set; \
185 movl $2, %edi; \
186 movq DRADDR_OFF(2)(%r15), %rsi; \
187 call kdi_dreg_set; \
188 movl $3, %edi; \
189 movq DRADDR_OFF(3)(%r15), %rsi; \
190 call kdi_dreg_set; \
191 popq %rdi; \
193 /* \
194 * Write any requested MSRs. \
195 */ \
196 movq KRS_MSR(%rdi), %rbx; \
197 cmpq $0, %rbx; \
198 je 3f; \
199 1: \
200 movl MSR_NUM(%rbx), %ecx; \
201 cmpl $0, %ecx; \
202 je 3f; \
204 movl MSR_TYPE(%rbx), %edx; \
205 cmpl $KDI_MSR_WRITE, %edx; \
206 jne 2f; \
208 movq MSR_VALP(%rbx), %rdx; \
209 movl 0(%rdx), %eax; \
210 movl 4(%rdx), %edx; \
211 wrmsr; \
212 2: \
213 addq $MSR_SIZE, %rbx; \
214 jmp 1b; \
215 3: \
216 /* \
217 * We must not branch after re-enabling LBR. If \
218 * kdi_wsr_wrexit_msr is set, it contains the number \
219 * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
220 * contains the value that is to be written to enable \
221 * LBR. \
222 */ \
223 leaq kdi_msr_wrexit_msr(%rip), %rcx; \
224 movl (%rcx), %ecx; \
225 cmpl $0, %ecx; \
226 je 1f; \
228 leaq kdi_msr_wrexit_valp(%rip), %rdx; \
229 movq (%rdx), %rdx; \
230 movl 0(%rdx), %eax; \
231 movl 4(%rdx), %edx; \
233 wrmsr; \
237 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
238 * The following macros manage the buffer.
241 /* Advance the ring buffer */
242 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
243 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
244 cmpq $[KDI_NCRUMBS - 1], tmp1; \
245 jge 1f; \
246 /* Advance the pointer and index */ \
247 addq $1, tmp1; \
248 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
249 movq KRS_CURCRUMB(cpusave), tmp1; \
250 addq $KRM_SIZE, tmp1; \
251 jmp 2f; \
252 1: /* Reset the pointer and index */ \
253 movq $0, KRS_CURCRUMBIDX(cpusave); \
254 leaq KRS_CRUMBS(cpusave), tmp1; \
255 2: movq tmp1, KRS_CURCRUMB(cpusave); \
256 /* Clear the new crumb */ \
257 movq $KDI_NCRUMBS, tmp2; \
258 3: movq $0, -4(tmp1, tmp2, 4); \
259 decq tmp2; \
260 jnz 3b
262 /* Set a value in the current breadcrumb buffer */
263 #define ADD_CRUMB(cpusave, offset, value, tmp) \
264 movq KRS_CURCRUMB(cpusave), tmp; \
265 movq value, offset(tmp)
267 #endif /* _ASM */
270 /* XXX implement me */
271 ENTRY_NP(kdi_nmiint)
272 clrq %rcx
273 movq (%rcx), %rcx
274 SET_SIZE(kdi_nmiint)
277 * The main entry point for master CPUs. It also serves as the trap
278 * handler for all traps and interrupts taken during single-step.
280 ENTRY_NP(kdi_cmnint)
281 ALTENTRY(kdi_master_entry)
283 pushq %rax
284 CLI(%rax)
285 popq %rax
287 /* Save current register state */
288 subq $REG_OFF(KDIREG_TRAPNO), %rsp
289 KDI_SAVE_REGS(%rsp)
293 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
294 * KGSBASE can be trusted, as the kernel may or may not have already
295 * done a swapgs. All is not lost, as the kernel can divine the correct
296 * value for us. Note that the previous GSBASE is saved in the
297 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
298 * blown away. On the hypervisor, we don't need to do this, since it's
299 * ensured we're on our requested kernel GSBASE already.
301 subq $10, %rsp
302 sgdt (%rsp)
303 movq 2(%rsp), %rdi /* gdt base now in %rdi */
304 addq $10, %rsp
305 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
307 movq %rax, %rdx
308 shrq $32, %rdx
309 movl $MSR_AMD_GSBASE, %ecx
310 wrmsr
312 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
314 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
316 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
318 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
319 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
320 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
321 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
322 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
324 movq %rsp, %rbp
325 pushq %rax
328 * Were we in the debugger when we took the trap (i.e. was %esp in one
329 * of the debugger's memory ranges)?
331 leaq kdi_memranges, %rcx
332 movl kdi_nmemranges, %edx
333 1: cmpq MR_BASE(%rcx), %rsp
334 jl 2f /* below this range -- try the next one */
335 cmpq MR_LIM(%rcx), %rsp
336 jg 2f /* above this range -- try the next one */
337 jmp 3f /* matched within this range */
339 2: decl %edx
340 jz kdi_save_common_state /* %rsp not within debugger memory */
341 addq $MR_SIZE, %rcx
342 jmp 1b
344 3: /*
345 * The master is still set. That should only happen if we hit a trap
346 * while running in the debugger. Note that it may be an intentional
347 * fault. kmdb_dpi_handle_fault will sort it all out.
350 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
351 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
352 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
353 movq %rbx, %rcx /* cpuid */
355 call kdi_dvec_handle_fault
358 * If we're here, we ran into a debugger problem, and the user
359 * elected to solve it by having the debugger debug itself. The
360 * state we're about to save is that of the debugger when it took
361 * the fault.
364 jmp kdi_save_common_state
366 SET_SIZE(kdi_master_entry)
367 SET_SIZE(kdi_cmnint)
371 * The cross-call handler for slave CPUs.
373 * The debugger is single-threaded, so only one CPU, called the master, may be
374 * running it at any given time. The other CPUs, known as slaves, spin in a
375 * busy loop until there's something for them to do. This is the entry point
376 * for the slaves - they'll be sent here in response to a cross-call sent by the
377 * master.
380 .globl kdi_slave_entry_patch;
382 ENTRY_NP(kdi_slave_entry)
384 /* kdi_msr_add_clrentry knows where this is */
385 kdi_slave_entry_patch:
386 KDI_MSR_PATCH;
389 * Cross calls are implemented as function calls, so our stack currently
390 * looks like one you'd get from a zero-argument function call. That
391 * is, there's the return %rip at %rsp, and that's about it. We need
392 * to make it look like an interrupt stack. When we first save, we'll
393 * reverse the saved %ss and %rip, which we'll fix back up when we've
394 * freed up some general-purpose registers. We'll also need to fix up
395 * the saved %rsp.
398 pushq %rsp /* pushed value off by 8 */
399 pushfq
400 CLI(%rax)
401 pushq $KCS_SEL
402 clrq %rax
403 movw %ss, %ax
404 pushq %rax /* rip should be here */
405 pushq $-1 /* phony trap error code */
406 pushq $-1 /* phony trap number */
408 subq $REG_OFF(KDIREG_TRAPNO), %rsp
409 KDI_SAVE_REGS(%rsp)
411 movq REG_OFF(KDIREG_SS)(%rsp), %rax
412 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
413 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
415 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
416 addq $8, %rax
417 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
420 * We've saved all of the general-purpose registers, and have a stack
421 * that is irettable (after we strip down to the error code)
424 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
426 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
428 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
430 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
431 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
433 pushq %rax
434 jmp kdi_save_common_state
436 SET_SIZE(kdi_slave_entry)
440 * The state of the world:
442 * The stack has a complete set of saved registers and segment
443 * selectors, arranged in the kdi_regs.h order. It also has a pointer
444 * to our cpusave area.
446 * We need to save, into the cpusave area, a pointer to these saved
447 * registers. First we check whether we should jump straight back to
448 * the kernel. If not, we save a few more registers, ready the
449 * machine for debugger entry, and enter the debugger.
453 ENTRY_NP(kdi_save_common_state)
455 popq %rdi /* the cpusave area */
456 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
458 pushq %rdi
459 call kdi_trap_pass
460 cmpq $1, %rax
461 je kdi_pass_to_kernel
462 popq %rax /* cpusave in %rax */
464 SAVE_IDTGDT
466 /* Save off %cr0, and clear write protect */
467 movq %cr0, %rcx
468 movq %rcx, KRS_CR0(%rax)
469 andq $_BITNOT(CR0_WP), %rcx
470 movq %rcx, %cr0
472 /* Save the debug registers and disable any active watchpoints */
474 movq %rax, %r15 /* save cpusave area ptr */
475 movl $7, %edi
476 call kdi_dreg_get
477 movq %rax, KRS_DRCTL(%r15)
479 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
480 movq %rax, %rsi
481 movl $7, %edi
482 call kdi_dreg_set
484 movl $6, %edi
485 call kdi_dreg_get
486 movq %rax, KRS_DRSTAT(%r15)
488 movl $0, %edi
489 call kdi_dreg_get
490 movq %rax, KRS_DROFF(0)(%r15)
492 movl $1, %edi
493 call kdi_dreg_get
494 movq %rax, KRS_DROFF(1)(%r15)
496 movl $2, %edi
497 call kdi_dreg_get
498 movq %rax, KRS_DROFF(2)(%r15)
500 movl $3, %edi
501 call kdi_dreg_get
502 movq %rax, KRS_DROFF(3)(%r15)
504 movq %r15, %rax /* restore cpu save area to rax */
507 * Save any requested MSRs.
509 movq KRS_MSR(%rax), %rcx
510 cmpq $0, %rcx
511 je no_msr
513 pushq %rax /* rdmsr clobbers %eax */
514 movq %rcx, %rbx
517 movl MSR_NUM(%rbx), %ecx
518 cmpl $0, %ecx
519 je msr_done
521 movl MSR_TYPE(%rbx), %edx
522 cmpl $KDI_MSR_READ, %edx
523 jne msr_next
525 rdmsr /* addr in %ecx, value into %edx:%eax */
526 movl %eax, MSR_VAL(%rbx)
527 movl %edx, _CONST(MSR_VAL + 4)(%rbx)
529 msr_next:
530 addq $MSR_SIZE, %rbx
531 jmp 1b
533 msr_done:
534 popq %rax
536 no_msr:
537 clrq %rbp /* stack traces should end here */
539 pushq %rax
540 movq %rax, %rdi /* cpusave */
542 call kdi_debugger_entry
544 /* Pass cpusave to kdi_resume */
545 popq %rdi
547 jmp kdi_resume
549 SET_SIZE(kdi_save_common_state)
553 * Resume the world. The code that calls kdi_resume has already
554 * decided whether or not to restore the IDT.
557 /* cpusave in %rdi */
558 ENTRY_NP(kdi_resume)
561 * Send this CPU back into the world
563 movq KRS_CR0(%rdi), %rdx
564 movq %rdx, %cr0
566 KDI_RESTORE_DEBUGGING_STATE
568 movq KRS_GREGS(%rdi), %rsp
569 KDI_RESTORE_REGS(%rsp)
570 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
571 IRET
572 /*NOTREACHED*/
573 SET_SIZE(kdi_resume)
577 ENTRY_NP(kdi_pass_to_kernel)
579 popq %rdi /* cpusave */
581 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
584 * Find the trap and vector off the right kernel handler. The trap
585 * handler will expect the stack to be in trap order, with %rip being
586 * the last entry, so we'll need to restore all our regs. On i86xpv
587 * we'll need to compensate for XPV_TRAP_POP.
589 * We're hard-coding the three cases where KMDB has installed permanent
590 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
591 * to work with; we can't use a global since other CPUs can easily pass
592 * through here at the same time.
594 * Note that we handle T_DBGENTR since userspace might have tried it.
596 movq KRS_GREGS(%rdi), %rsp
597 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
598 cmpq $T_SGLSTP, %rdi
599 je 1f
600 cmpq $T_BPTFLT, %rdi
601 je 2f
602 cmpq $T_DBGENTR, %rdi
603 je 3f
605 * Hmm, unknown handler. Somebody forgot to update this when they
606 * added a new trap interposition... try to drop back into kmdb.
608 int $T_DBGENTR
610 #define CALL_TRAP_HANDLER(name) \
611 KDI_RESTORE_REGS(%rsp); \
612 /* Discard state, trapno, err */ \
613 addq $REG_OFF(KDIREG_RIP), %rsp; \
614 XPV_TRAP_PUSH; \
615 jmp %cs:name
618 CALL_TRAP_HANDLER(dbgtrap)
619 /*NOTREACHED*/
621 CALL_TRAP_HANDLER(brktrap)
622 /*NOTREACHED*/
624 CALL_TRAP_HANDLER(invaltrap)
625 /*NOTREACHED*/
627 SET_SIZE(kdi_pass_to_kernel)
630 * A minimal version of mdboot(), to be used by the master CPU only.
632 ENTRY_NP(kdi_reboot)
634 movl $AD_BOOT, %edi
635 movl $A_SHUTDOWN, %esi
636 call *psm_shutdownf
637 call reset
638 /*NOTREACHED*/
640 SET_SIZE(kdi_reboot)
644 ENTRY_NP(kdi_cpu_debug_init)
645 pushq %rbp
646 movq %rsp, %rbp
648 pushq %rbx /* macro will clobber %rbx */
649 KDI_RESTORE_DEBUGGING_STATE
650 popq %rbx
652 leave
655 SET_SIZE(kdi_cpu_debug_init)