4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
32 #include "monitor/monitor.h"
33 #include "qemu-char.h"
35 #include "exec/gdbstub.h"
38 #define MAX_PACKET_LENGTH 4096
41 #include "qemu_socket.h"
44 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
45 static inline int target_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
46 uint8_t *buf
, int len
, int is_write
)
48 return cpu_memory_rw_debug(env
, addr
, buf
, len
, is_write
);
51 /* target_memory_rw_debug() defined in cpu.h */
63 GDB_SIGNAL_UNKNOWN
= 143
66 #ifdef CONFIG_USER_ONLY
68 /* Map target signal numbers to GDB protocol signal numbers and vice
69 * versa. For user emulation's currently supported systems, we can
70 * assume most signals are defined.
73 static int gdb_signal_table
[] = {
233 /* In system mode we only need SIGINT and SIGTRAP; other signals
234 are not yet supported. */
241 static int gdb_signal_table
[] = {
251 #ifdef CONFIG_USER_ONLY
252 static int target_signal_to_gdb (int sig
)
255 for (i
= 0; i
< ARRAY_SIZE (gdb_signal_table
); i
++)
256 if (gdb_signal_table
[i
] == sig
)
258 return GDB_SIGNAL_UNKNOWN
;
262 static int gdb_signal_to_target (int sig
)
264 if (sig
< ARRAY_SIZE (gdb_signal_table
))
265 return gdb_signal_table
[sig
];
272 typedef struct GDBRegisterState
{
278 struct GDBRegisterState
*next
;
288 typedef struct GDBState
{
289 CPUArchState
*c_cpu
; /* current CPU for step/continue ops */
290 CPUArchState
*g_cpu
; /* current CPU for other ops */
291 CPUArchState
*query_cpu
; /* for q{f|s}ThreadInfo */
292 enum RSState state
; /* parsing state */
293 char line_buf
[MAX_PACKET_LENGTH
];
296 uint8_t last_packet
[MAX_PACKET_LENGTH
+ 4];
299 #ifdef CONFIG_USER_ONLY
303 CharDriverState
*chr
;
304 CharDriverState
*mon_chr
;
306 char syscall_buf
[256];
307 gdb_syscall_complete_cb current_syscall_cb
;
310 /* By default use no IRQs and no timers while single stepping so as to
311 * make single stepping like an ICE HW step.
313 static int sstep_flags
= SSTEP_ENABLE
|SSTEP_NOIRQ
|SSTEP_NOTIMER
;
315 static GDBState
*gdbserver_state
;
317 /* This is an ugly hack to cope with both new and old gdb.
318 If gdb sends qXfer:features:read then assume we're talking to a newish
319 gdb that understands target descriptions. */
320 static int gdb_has_xml
;
322 #ifdef CONFIG_USER_ONLY
323 /* XXX: This is not thread safe. Do we care? */
324 static int gdbserver_fd
= -1;
326 static int get_char(GDBState
*s
)
332 ret
= qemu_recv(s
->fd
, &ch
, 1, 0);
334 if (errno
== ECONNRESET
)
336 if (errno
!= EINTR
&& errno
!= EAGAIN
)
338 } else if (ret
== 0) {
356 /* If gdb is connected when the first semihosting syscall occurs then use
357 remote gdb syscalls. Otherwise use native file IO. */
358 int use_gdb_syscalls(void)
360 if (gdb_syscall_mode
== GDB_SYS_UNKNOWN
) {
361 gdb_syscall_mode
= (gdbserver_state
? GDB_SYS_ENABLED
364 return gdb_syscall_mode
== GDB_SYS_ENABLED
;
367 /* Resume execution. */
368 static inline void gdb_continue(GDBState
*s
)
370 #ifdef CONFIG_USER_ONLY
371 s
->running_state
= 1;
377 static void put_buffer(GDBState
*s
, const uint8_t *buf
, int len
)
379 #ifdef CONFIG_USER_ONLY
383 ret
= send(s
->fd
, buf
, len
, 0);
385 if (errno
!= EINTR
&& errno
!= EAGAIN
)
393 qemu_chr_fe_write(s
->chr
, buf
, len
);
397 static inline int fromhex(int v
)
399 if (v
>= '0' && v
<= '9')
401 else if (v
>= 'A' && v
<= 'F')
403 else if (v
>= 'a' && v
<= 'f')
409 static inline int tohex(int v
)
417 static void memtohex(char *buf
, const uint8_t *mem
, int len
)
422 for(i
= 0; i
< len
; i
++) {
424 *q
++ = tohex(c
>> 4);
425 *q
++ = tohex(c
& 0xf);
430 static void hextomem(uint8_t *mem
, const char *buf
, int len
)
434 for(i
= 0; i
< len
; i
++) {
435 mem
[i
] = (fromhex(buf
[0]) << 4) | fromhex(buf
[1]);
440 /* return -1 if error, 0 if OK */
441 static int put_packet_binary(GDBState
*s
, const char *buf
, int len
)
452 for(i
= 0; i
< len
; i
++) {
456 *(p
++) = tohex((csum
>> 4) & 0xf);
457 *(p
++) = tohex((csum
) & 0xf);
459 s
->last_packet_len
= p
- s
->last_packet
;
460 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
462 #ifdef CONFIG_USER_ONLY
475 /* return -1 if error, 0 if OK */
476 static int put_packet(GDBState
*s
, const char *buf
)
479 printf("reply='%s'\n", buf
);
482 return put_packet_binary(s
, buf
, strlen(buf
));
485 /* The GDB remote protocol transfers values in target byte order. This means
486 we can use the raw memory access routines to access the value buffer.
487 Conveniently, these also handle the case where the buffer is mis-aligned.
489 #define GET_REG8(val) do { \
490 stb_p(mem_buf, val); \
493 #define GET_REG16(val) do { \
494 stw_p(mem_buf, val); \
497 #define GET_REG32(val) do { \
498 stl_p(mem_buf, val); \
501 #define GET_REG64(val) do { \
502 stq_p(mem_buf, val); \
506 #if TARGET_LONG_BITS == 64
507 #define GET_REGL(val) GET_REG64(val)
508 #define ldtul_p(addr) ldq_p(addr)
510 #define GET_REGL(val) GET_REG32(val)
511 #define ldtul_p(addr) ldl_p(addr)
514 #if defined(TARGET_I386)
517 static const int gpr_map
[16] = {
518 R_EAX
, R_EBX
, R_ECX
, R_EDX
, R_ESI
, R_EDI
, R_EBP
, R_ESP
,
519 8, 9, 10, 11, 12, 13, 14, 15
522 #define gpr_map gpr_map32
524 static const int gpr_map32
[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
526 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
528 #define IDX_IP_REG CPU_NB_REGS
529 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
530 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
531 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
532 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
533 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
535 static int cpu_gdb_read_register(CPUX86State
*env
, uint8_t *mem_buf
, int n
)
537 if (n
< CPU_NB_REGS
) {
538 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
539 GET_REG64(env
->regs
[gpr_map
[n
]]);
540 } else if (n
< CPU_NB_REGS32
) {
541 GET_REG32(env
->regs
[gpr_map32
[n
]]);
543 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
544 #ifdef USE_X86LDOUBLE
545 /* FIXME: byteswap float values - after fixing fpregs layout. */
546 memcpy(mem_buf
, &env
->fpregs
[n
- IDX_FP_REGS
], 10);
548 memset(mem_buf
, 0, 10);
551 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
553 if (n
< CPU_NB_REGS32
||
554 (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
)) {
555 stq_p(mem_buf
, env
->xmm_regs
[n
].XMM_Q(0));
556 stq_p(mem_buf
+ 8, env
->xmm_regs
[n
].XMM_Q(1));
562 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
567 case IDX_FLAGS_REG
: GET_REG32(env
->eflags
);
569 case IDX_SEG_REGS
: GET_REG32(env
->segs
[R_CS
].selector
);
570 case IDX_SEG_REGS
+ 1: GET_REG32(env
->segs
[R_SS
].selector
);
571 case IDX_SEG_REGS
+ 2: GET_REG32(env
->segs
[R_DS
].selector
);
572 case IDX_SEG_REGS
+ 3: GET_REG32(env
->segs
[R_ES
].selector
);
573 case IDX_SEG_REGS
+ 4: GET_REG32(env
->segs
[R_FS
].selector
);
574 case IDX_SEG_REGS
+ 5: GET_REG32(env
->segs
[R_GS
].selector
);
576 case IDX_FP_REGS
+ 8: GET_REG32(env
->fpuc
);
577 case IDX_FP_REGS
+ 9: GET_REG32((env
->fpus
& ~0x3800) |
578 (env
->fpstt
& 0x7) << 11);
579 case IDX_FP_REGS
+ 10: GET_REG32(0); /* ftag */
580 case IDX_FP_REGS
+ 11: GET_REG32(0); /* fiseg */
581 case IDX_FP_REGS
+ 12: GET_REG32(0); /* fioff */
582 case IDX_FP_REGS
+ 13: GET_REG32(0); /* foseg */
583 case IDX_FP_REGS
+ 14: GET_REG32(0); /* fooff */
584 case IDX_FP_REGS
+ 15: GET_REG32(0); /* fop */
586 case IDX_MXCSR_REG
: GET_REG32(env
->mxcsr
);
592 static int cpu_x86_gdb_load_seg(CPUX86State
*env
, int sreg
, uint8_t *mem_buf
)
594 uint16_t selector
= ldl_p(mem_buf
);
596 if (selector
!= env
->segs
[sreg
].selector
) {
597 #if defined(CONFIG_USER_ONLY)
598 cpu_x86_load_seg(env
, sreg
, selector
);
600 unsigned int limit
, flags
;
603 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
604 base
= selector
<< 4;
608 if (!cpu_x86_get_descr_debug(env
, selector
, &base
, &limit
, &flags
))
611 cpu_x86_load_seg_cache(env
, sreg
, selector
, base
, limit
, flags
);
617 static int cpu_gdb_write_register(CPUX86State
*env
, uint8_t *mem_buf
, int n
)
621 if (n
< CPU_NB_REGS
) {
622 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
623 env
->regs
[gpr_map
[n
]] = ldtul_p(mem_buf
);
624 return sizeof(target_ulong
);
625 } else if (n
< CPU_NB_REGS32
) {
627 env
->regs
[n
] &= ~0xffffffffUL
;
628 env
->regs
[n
] |= (uint32_t)ldl_p(mem_buf
);
631 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
632 #ifdef USE_X86LDOUBLE
633 /* FIXME: byteswap float values - after fixing fpregs layout. */
634 memcpy(&env
->fpregs
[n
- IDX_FP_REGS
], mem_buf
, 10);
637 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
639 if (n
< CPU_NB_REGS32
||
640 (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
)) {
641 env
->xmm_regs
[n
].XMM_Q(0) = ldq_p(mem_buf
);
642 env
->xmm_regs
[n
].XMM_Q(1) = ldq_p(mem_buf
+ 8);
648 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
649 env
->eip
= ldq_p(mem_buf
);
652 env
->eip
&= ~0xffffffffUL
;
653 env
->eip
|= (uint32_t)ldl_p(mem_buf
);
657 env
->eflags
= ldl_p(mem_buf
);
660 case IDX_SEG_REGS
: return cpu_x86_gdb_load_seg(env
, R_CS
, mem_buf
);
661 case IDX_SEG_REGS
+ 1: return cpu_x86_gdb_load_seg(env
, R_SS
, mem_buf
);
662 case IDX_SEG_REGS
+ 2: return cpu_x86_gdb_load_seg(env
, R_DS
, mem_buf
);
663 case IDX_SEG_REGS
+ 3: return cpu_x86_gdb_load_seg(env
, R_ES
, mem_buf
);
664 case IDX_SEG_REGS
+ 4: return cpu_x86_gdb_load_seg(env
, R_FS
, mem_buf
);
665 case IDX_SEG_REGS
+ 5: return cpu_x86_gdb_load_seg(env
, R_GS
, mem_buf
);
667 case IDX_FP_REGS
+ 8:
668 env
->fpuc
= ldl_p(mem_buf
);
670 case IDX_FP_REGS
+ 9:
671 tmp
= ldl_p(mem_buf
);
672 env
->fpstt
= (tmp
>> 11) & 7;
673 env
->fpus
= tmp
& ~0x3800;
675 case IDX_FP_REGS
+ 10: /* ftag */ return 4;
676 case IDX_FP_REGS
+ 11: /* fiseg */ return 4;
677 case IDX_FP_REGS
+ 12: /* fioff */ return 4;
678 case IDX_FP_REGS
+ 13: /* foseg */ return 4;
679 case IDX_FP_REGS
+ 14: /* fooff */ return 4;
680 case IDX_FP_REGS
+ 15: /* fop */ return 4;
683 env
->mxcsr
= ldl_p(mem_buf
);
687 /* Unrecognised register. */
691 #elif defined (TARGET_PPC)
693 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
694 expects whatever the target description contains. Due to a
695 historical mishap the FP registers appear in between core integer
696 regs and PC, MSR, CR, and so forth. We hack round this by giving the
697 FP regs zero size when talking to a newer gdb. */
698 #define NUM_CORE_REGS 71
699 #if defined (TARGET_PPC64)
700 #define GDB_CORE_XML "power64-core.xml"
702 #define GDB_CORE_XML "power-core.xml"
705 static int cpu_gdb_read_register(CPUPPCState
*env
, uint8_t *mem_buf
, int n
)
709 GET_REGL(env
->gpr
[n
]);
714 stfq_p(mem_buf
, env
->fpr
[n
-32]);
718 case 64: GET_REGL(env
->nip
);
719 case 65: GET_REGL(env
->msr
);
724 for (i
= 0; i
< 8; i
++)
725 cr
|= env
->crf
[i
] << (32 - ((i
+ 1) * 4));
728 case 67: GET_REGL(env
->lr
);
729 case 68: GET_REGL(env
->ctr
);
730 case 69: GET_REGL(env
->xer
);
735 GET_REG32(env
->fpscr
);
742 static int cpu_gdb_write_register(CPUPPCState
*env
, uint8_t *mem_buf
, int n
)
746 env
->gpr
[n
] = ldtul_p(mem_buf
);
747 return sizeof(target_ulong
);
752 env
->fpr
[n
-32] = ldfq_p(mem_buf
);
757 env
->nip
= ldtul_p(mem_buf
);
758 return sizeof(target_ulong
);
760 ppc_store_msr(env
, ldtul_p(mem_buf
));
761 return sizeof(target_ulong
);
764 uint32_t cr
= ldl_p(mem_buf
);
766 for (i
= 0; i
< 8; i
++)
767 env
->crf
[i
] = (cr
>> (32 - ((i
+ 1) * 4))) & 0xF;
771 env
->lr
= ldtul_p(mem_buf
);
772 return sizeof(target_ulong
);
774 env
->ctr
= ldtul_p(mem_buf
);
775 return sizeof(target_ulong
);
777 env
->xer
= ldtul_p(mem_buf
);
778 return sizeof(target_ulong
);
789 #elif defined (TARGET_SPARC)
791 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
792 #define NUM_CORE_REGS 86
794 #define NUM_CORE_REGS 72
798 #define GET_REGA(val) GET_REG32(val)
800 #define GET_REGA(val) GET_REGL(val)
803 static int cpu_gdb_read_register(CPUSPARCState
*env
, uint8_t *mem_buf
, int n
)
807 GET_REGA(env
->gregs
[n
]);
810 /* register window */
811 GET_REGA(env
->regwptr
[n
- 8]);
813 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
817 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.lower
);
819 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.upper
);
822 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
824 case 64: GET_REGA(env
->y
);
825 case 65: GET_REGA(cpu_get_psr(env
));
826 case 66: GET_REGA(env
->wim
);
827 case 67: GET_REGA(env
->tbr
);
828 case 68: GET_REGA(env
->pc
);
829 case 69: GET_REGA(env
->npc
);
830 case 70: GET_REGA(env
->fsr
);
831 case 71: GET_REGA(0); /* csr */
832 default: GET_REGA(0);
838 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.lower
);
840 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.upper
);
844 /* f32-f62 (double width, even numbers only) */
845 GET_REG64(env
->fpr
[(n
- 32) / 2].ll
);
848 case 80: GET_REGL(env
->pc
);
849 case 81: GET_REGL(env
->npc
);
850 case 82: GET_REGL((cpu_get_ccr(env
) << 32) |
851 ((env
->asi
& 0xff) << 24) |
852 ((env
->pstate
& 0xfff) << 8) |
854 case 83: GET_REGL(env
->fsr
);
855 case 84: GET_REGL(env
->fprs
);
856 case 85: GET_REGL(env
->y
);
862 static int cpu_gdb_write_register(CPUSPARCState
*env
, uint8_t *mem_buf
, int n
)
864 #if defined(TARGET_ABI32)
867 tmp
= ldl_p(mem_buf
);
871 tmp
= ldtul_p(mem_buf
);
878 /* register window */
879 env
->regwptr
[n
- 8] = tmp
;
881 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
886 env
->fpr
[(n
- 32) / 2].l
.lower
= tmp
;
888 env
->fpr
[(n
- 32) / 2].l
.upper
= tmp
;
891 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
893 case 64: env
->y
= tmp
; break;
894 case 65: cpu_put_psr(env
, tmp
); break;
895 case 66: env
->wim
= tmp
; break;
896 case 67: env
->tbr
= tmp
; break;
897 case 68: env
->pc
= tmp
; break;
898 case 69: env
->npc
= tmp
; break;
899 case 70: env
->fsr
= tmp
; break;
907 tmp
= ldl_p(mem_buf
);
909 env
->fpr
[(n
- 32) / 2].l
.lower
= tmp
;
911 env
->fpr
[(n
- 32) / 2].l
.upper
= tmp
;
915 /* f32-f62 (double width, even numbers only) */
916 env
->fpr
[(n
- 32) / 2].ll
= tmp
;
919 case 80: env
->pc
= tmp
; break;
920 case 81: env
->npc
= tmp
; break;
922 cpu_put_ccr(env
, tmp
>> 32);
923 env
->asi
= (tmp
>> 24) & 0xff;
924 env
->pstate
= (tmp
>> 8) & 0xfff;
925 cpu_put_cwp64(env
, tmp
& 0xff);
927 case 83: env
->fsr
= tmp
; break;
928 case 84: env
->fprs
= tmp
; break;
929 case 85: env
->y
= tmp
; break;
936 #elif defined (TARGET_ARM)
938 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
939 whatever the target description contains. Due to a historical mishap
940 the FPA registers appear in between core integer regs and the CPSR.
941 We hack round this by giving the FPA regs zero size when talking to a
943 #define NUM_CORE_REGS 26
944 #define GDB_CORE_XML "arm-core.xml"
946 static int cpu_gdb_read_register(CPUARMState
*env
, uint8_t *mem_buf
, int n
)
949 /* Core integer register. */
950 GET_REG32(env
->regs
[n
]);
956 memset(mem_buf
, 0, 12);
961 /* FPA status register. */
967 GET_REG32(cpsr_read(env
));
969 /* Unknown register. */
973 static int cpu_gdb_write_register(CPUARMState
*env
, uint8_t *mem_buf
, int n
)
977 tmp
= ldl_p(mem_buf
);
979 /* Mask out low bit of PC to workaround gdb bugs. This will probably
980 cause problems if we ever implement the Jazelle DBX extensions. */
985 /* Core integer register. */
989 if (n
< 24) { /* 16-23 */
990 /* FPA registers (ignored). */
997 /* FPA status register (ignored). */
1003 cpsr_write (env
, tmp
, 0xffffffff);
1006 /* Unknown register. */
1010 #elif defined (TARGET_M68K)
1012 #define NUM_CORE_REGS 18
1014 #define GDB_CORE_XML "cf-core.xml"
1016 static int cpu_gdb_read_register(CPUM68KState
*env
, uint8_t *mem_buf
, int n
)
1020 GET_REG32(env
->dregs
[n
]);
1021 } else if (n
< 16) {
1023 GET_REG32(env
->aregs
[n
- 8]);
1026 case 16: GET_REG32(env
->sr
);
1027 case 17: GET_REG32(env
->pc
);
1030 /* FP registers not included here because they vary between
1031 ColdFire and m68k. Use XML bits for these. */
1035 static int cpu_gdb_write_register(CPUM68KState
*env
, uint8_t *mem_buf
, int n
)
1039 tmp
= ldl_p(mem_buf
);
1043 env
->dregs
[n
] = tmp
;
1044 } else if (n
< 16) {
1046 env
->aregs
[n
- 8] = tmp
;
1049 case 16: env
->sr
= tmp
; break;
1050 case 17: env
->pc
= tmp
; break;
1056 #elif defined (TARGET_MIPS)
1058 #define NUM_CORE_REGS 73
1060 static int cpu_gdb_read_register(CPUMIPSState
*env
, uint8_t *mem_buf
, int n
)
1063 GET_REGL(env
->active_tc
.gpr
[n
]);
1065 if (env
->CP0_Config1
& (1 << CP0C1_FP
)) {
1066 if (n
>= 38 && n
< 70) {
1067 if (env
->CP0_Status
& (1 << CP0St_FR
))
1068 GET_REGL(env
->active_fpu
.fpr
[n
- 38].d
);
1070 GET_REGL(env
->active_fpu
.fpr
[n
- 38].w
[FP_ENDIAN_IDX
]);
1073 case 70: GET_REGL((int32_t)env
->active_fpu
.fcr31
);
1074 case 71: GET_REGL((int32_t)env
->active_fpu
.fcr0
);
1078 case 32: GET_REGL((int32_t)env
->CP0_Status
);
1079 case 33: GET_REGL(env
->active_tc
.LO
[0]);
1080 case 34: GET_REGL(env
->active_tc
.HI
[0]);
1081 case 35: GET_REGL(env
->CP0_BadVAddr
);
1082 case 36: GET_REGL((int32_t)env
->CP0_Cause
);
1083 case 37: GET_REGL(env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
));
1084 case 72: GET_REGL(0); /* fp */
1085 case 89: GET_REGL((int32_t)env
->CP0_PRid
);
1087 if (n
>= 73 && n
<= 88) {
1088 /* 16 embedded regs. */
1095 /* convert MIPS rounding mode in FCR31 to IEEE library */
1096 static unsigned int ieee_rm
[] =
1098 float_round_nearest_even
,
1099 float_round_to_zero
,
1103 #define RESTORE_ROUNDING_MODE \
1104 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1106 static int cpu_gdb_write_register(CPUMIPSState
*env
, uint8_t *mem_buf
, int n
)
1110 tmp
= ldtul_p(mem_buf
);
1113 env
->active_tc
.gpr
[n
] = tmp
;
1114 return sizeof(target_ulong
);
1116 if (env
->CP0_Config1
& (1 << CP0C1_FP
)
1117 && n
>= 38 && n
< 73) {
1119 if (env
->CP0_Status
& (1 << CP0St_FR
))
1120 env
->active_fpu
.fpr
[n
- 38].d
= tmp
;
1122 env
->active_fpu
.fpr
[n
- 38].w
[FP_ENDIAN_IDX
] = tmp
;
1126 env
->active_fpu
.fcr31
= tmp
& 0xFF83FFFF;
1127 /* set rounding mode */
1128 RESTORE_ROUNDING_MODE
;
1130 case 71: env
->active_fpu
.fcr0
= tmp
; break;
1132 return sizeof(target_ulong
);
1135 case 32: env
->CP0_Status
= tmp
; break;
1136 case 33: env
->active_tc
.LO
[0] = tmp
; break;
1137 case 34: env
->active_tc
.HI
[0] = tmp
; break;
1138 case 35: env
->CP0_BadVAddr
= tmp
; break;
1139 case 36: env
->CP0_Cause
= tmp
; break;
1141 env
->active_tc
.PC
= tmp
& ~(target_ulong
)1;
1143 env
->hflags
|= MIPS_HFLAG_M16
;
1145 env
->hflags
&= ~(MIPS_HFLAG_M16
);
1148 case 72: /* fp, ignored */ break;
1152 /* Other registers are readonly. Ignore writes. */
1156 return sizeof(target_ulong
);
1158 #elif defined(TARGET_OPENRISC)
1160 #define NUM_CORE_REGS (32 + 3)
1162 static int cpu_gdb_read_register(CPUOpenRISCState
*env
, uint8_t *mem_buf
, int n
)
1165 GET_REG32(env
->gpr
[n
]);
1169 GET_REG32(env
->ppc
);
1173 GET_REG32(env
->npc
);
1187 static int cpu_gdb_write_register(CPUOpenRISCState
*env
,
1188 uint8_t *mem_buf
, int n
)
1192 if (n
> NUM_CORE_REGS
) {
1196 tmp
= ldl_p(mem_buf
);
1220 #elif defined (TARGET_SH4)
1222 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1223 /* FIXME: We should use XML for this. */
1225 #define NUM_CORE_REGS 59
1227 static int cpu_gdb_read_register(CPUSH4State
*env
, uint8_t *mem_buf
, int n
)
1231 if ((env
->sr
& (SR_MD
| SR_RB
)) == (SR_MD
| SR_RB
)) {
1232 GET_REGL(env
->gregs
[n
+ 16]);
1234 GET_REGL(env
->gregs
[n
]);
1237 GET_REGL(env
->gregs
[n
]);
1247 GET_REGL(env
->mach
);
1249 GET_REGL(env
->macl
);
1253 GET_REGL(env
->fpul
);
1255 GET_REGL(env
->fpscr
);
1257 if (env
->fpscr
& FPSCR_FR
) {
1258 stfl_p(mem_buf
, env
->fregs
[n
- 9]);
1260 stfl_p(mem_buf
, env
->fregs
[n
- 25]);
1268 GET_REGL(env
->gregs
[n
- 43]);
1270 GET_REGL(env
->gregs
[n
- (51 - 16)]);
1276 static int cpu_gdb_write_register(CPUSH4State
*env
, uint8_t *mem_buf
, int n
)
1280 if ((env
->sr
& (SR_MD
| SR_RB
)) == (SR_MD
| SR_RB
)) {
1281 env
->gregs
[n
+ 16] = ldl_p(mem_buf
);
1283 env
->gregs
[n
] = ldl_p(mem_buf
);
1287 env
->gregs
[n
] = ldl_p(mem_buf
);
1290 env
->pc
= ldl_p(mem_buf
);
1293 env
->pr
= ldl_p(mem_buf
);
1296 env
->gbr
= ldl_p(mem_buf
);
1299 env
->vbr
= ldl_p(mem_buf
);
1302 env
->mach
= ldl_p(mem_buf
);
1305 env
->macl
= ldl_p(mem_buf
);
1308 env
->sr
= ldl_p(mem_buf
);
1311 env
->fpul
= ldl_p(mem_buf
);
1314 env
->fpscr
= ldl_p(mem_buf
);
1317 if (env
->fpscr
& FPSCR_FR
) {
1318 env
->fregs
[n
- 9] = ldfl_p(mem_buf
);
1320 env
->fregs
[n
- 25] = ldfl_p(mem_buf
);
1324 env
->ssr
= ldl_p(mem_buf
);
1327 env
->spc
= ldl_p(mem_buf
);
1330 env
->gregs
[n
- 43] = ldl_p(mem_buf
);
1333 env
->gregs
[n
- (51 - 16)] = ldl_p(mem_buf
);
1340 #elif defined (TARGET_MICROBLAZE)
1342 #define NUM_CORE_REGS (32 + 5)
1344 static int cpu_gdb_read_register(CPUMBState
*env
, uint8_t *mem_buf
, int n
)
1347 GET_REG32(env
->regs
[n
]);
1349 GET_REG32(env
->sregs
[n
- 32]);
1354 static int cpu_gdb_write_register(CPUMBState
*env
, uint8_t *mem_buf
, int n
)
1358 if (n
> NUM_CORE_REGS
)
1361 tmp
= ldl_p(mem_buf
);
1366 env
->sregs
[n
- 32] = tmp
;
1370 #elif defined (TARGET_CRIS)
1372 #define NUM_CORE_REGS 49
1375 read_register_crisv10(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1378 GET_REG32(env
->regs
[n
]);
1388 GET_REG8(env
->pregs
[n
- 16]);
1391 GET_REG8(env
->pregs
[n
- 16]);
1395 GET_REG16(env
->pregs
[n
- 16]);
1399 GET_REG32(env
->pregs
[n
- 16]);
1407 static int cpu_gdb_read_register(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1411 if (env
->pregs
[PR_VR
] < 32)
1412 return read_register_crisv10(env
, mem_buf
, n
);
1414 srs
= env
->pregs
[PR_SRS
];
1416 GET_REG32(env
->regs
[n
]);
1419 if (n
>= 21 && n
< 32) {
1420 GET_REG32(env
->pregs
[n
- 16]);
1422 if (n
>= 33 && n
< 49) {
1423 GET_REG32(env
->sregs
[srs
][n
- 33]);
1426 case 16: GET_REG8(env
->pregs
[0]);
1427 case 17: GET_REG8(env
->pregs
[1]);
1428 case 18: GET_REG32(env
->pregs
[2]);
1429 case 19: GET_REG8(srs
);
1430 case 20: GET_REG16(env
->pregs
[4]);
1431 case 32: GET_REG32(env
->pc
);
1437 static int cpu_gdb_write_register(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1444 tmp
= ldl_p(mem_buf
);
1450 if (n
>= 21 && n
< 32) {
1451 env
->pregs
[n
- 16] = tmp
;
1454 /* FIXME: Should support function regs be writable? */
1458 case 18: env
->pregs
[PR_PID
] = tmp
; break;
1461 case 32: env
->pc
= tmp
; break;
1466 #elif defined (TARGET_ALPHA)
1468 #define NUM_CORE_REGS 67
1470 static int cpu_gdb_read_register(CPUAlphaState
*env
, uint8_t *mem_buf
, int n
)
1480 d
.d
= env
->fir
[n
- 32];
1484 val
= cpu_alpha_load_fpcr(env
);
1494 /* 31 really is the zero register; 65 is unassigned in the
1495 gdb protocol, but is still required to occupy 8 bytes. */
1504 static int cpu_gdb_write_register(CPUAlphaState
*env
, uint8_t *mem_buf
, int n
)
1506 target_ulong tmp
= ldtul_p(mem_buf
);
1515 env
->fir
[n
- 32] = d
.d
;
1518 cpu_alpha_store_fpcr(env
, tmp
);
1528 /* 31 really is the zero register; 65 is unassigned in the
1529 gdb protocol, but is still required to occupy 8 bytes. */
1536 #elif defined (TARGET_S390X)
1538 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1540 static int cpu_gdb_read_register(CPUS390XState
*env
, uint8_t *mem_buf
, int n
)
1543 case S390_PSWM_REGNUM
: GET_REGL(env
->psw
.mask
); break;
1544 case S390_PSWA_REGNUM
: GET_REGL(env
->psw
.addr
); break;
1545 case S390_R0_REGNUM
... S390_R15_REGNUM
:
1546 GET_REGL(env
->regs
[n
-S390_R0_REGNUM
]); break;
1547 case S390_A0_REGNUM
... S390_A15_REGNUM
:
1548 GET_REG32(env
->aregs
[n
-S390_A0_REGNUM
]); break;
1549 case S390_FPC_REGNUM
: GET_REG32(env
->fpc
); break;
1550 case S390_F0_REGNUM
... S390_F15_REGNUM
:
1553 case S390_PC_REGNUM
: GET_REGL(env
->psw
.addr
); break;
1554 case S390_CC_REGNUM
:
1555 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
,
1557 GET_REG32(env
->cc_op
);
1564 static int cpu_gdb_write_register(CPUS390XState
*env
, uint8_t *mem_buf
, int n
)
1569 tmpl
= ldtul_p(mem_buf
);
1570 tmp32
= ldl_p(mem_buf
);
1573 case S390_PSWM_REGNUM
: env
->psw
.mask
= tmpl
; break;
1574 case S390_PSWA_REGNUM
: env
->psw
.addr
= tmpl
; break;
1575 case S390_R0_REGNUM
... S390_R15_REGNUM
:
1576 env
->regs
[n
-S390_R0_REGNUM
] = tmpl
; break;
1577 case S390_A0_REGNUM
... S390_A15_REGNUM
:
1578 env
->aregs
[n
-S390_A0_REGNUM
] = tmp32
; r
=4; break;
1579 case S390_FPC_REGNUM
: env
->fpc
= tmp32
; r
=4; break;
1580 case S390_F0_REGNUM
... S390_F15_REGNUM
:
1583 case S390_PC_REGNUM
: env
->psw
.addr
= tmpl
; break;
1584 case S390_CC_REGNUM
: env
->cc_op
= tmp32
; r
=4; break;
1589 #elif defined (TARGET_LM32)
1591 #include "hw/lm32_pic.h"
1592 #define NUM_CORE_REGS (32 + 7)
1594 static int cpu_gdb_read_register(CPULM32State
*env
, uint8_t *mem_buf
, int n
)
1597 GET_REG32(env
->regs
[n
]);
1603 /* FIXME: put in right exception ID */
1608 GET_REG32(env
->eba
);
1611 GET_REG32(env
->deba
);
1617 GET_REG32(lm32_pic_get_im(env
->pic_state
));
1620 GET_REG32(lm32_pic_get_ip(env
->pic_state
));
1627 static int cpu_gdb_write_register(CPULM32State
*env
, uint8_t *mem_buf
, int n
)
1631 if (n
> NUM_CORE_REGS
) {
1635 tmp
= ldl_p(mem_buf
);
1654 lm32_pic_set_im(env
->pic_state
, tmp
);
1657 lm32_pic_set_ip(env
->pic_state
, tmp
);
1663 #elif defined(TARGET_XTENSA)
1665 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1666 * Use num_regs to see all registers. gdb modification is required for that:
1667 * reset bit 0 in the 'flags' field of the registers definitions in the
1668 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1670 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1671 #define num_g_regs NUM_CORE_REGS
1673 static int cpu_gdb_read_register(CPUXtensaState
*env
, uint8_t *mem_buf
, int n
)
1675 const XtensaGdbReg
*reg
= env
->config
->gdb_regmap
.reg
+ n
;
1677 if (n
< 0 || n
>= env
->config
->gdb_regmap
.num_regs
) {
1681 switch (reg
->type
) {
1687 xtensa_sync_phys_from_window(env
);
1688 GET_REG32(env
->phys_regs
[(reg
->targno
& 0xff) % env
->config
->nareg
]);
1692 GET_REG32(env
->sregs
[reg
->targno
& 0xff]);
1696 GET_REG32(env
->uregs
[reg
->targno
& 0xff]);
1700 GET_REG32(float32_val(env
->fregs
[reg
->targno
& 0x0f]));
1704 GET_REG32(env
->regs
[reg
->targno
& 0x0f]);
1708 qemu_log("%s from reg %d of unsupported type %d\n",
1709 __func__
, n
, reg
->type
);
1714 static int cpu_gdb_write_register(CPUXtensaState
*env
, uint8_t *mem_buf
, int n
)
1717 const XtensaGdbReg
*reg
= env
->config
->gdb_regmap
.reg
+ n
;
1719 if (n
< 0 || n
>= env
->config
->gdb_regmap
.num_regs
) {
1723 tmp
= ldl_p(mem_buf
);
1725 switch (reg
->type
) {
1731 env
->phys_regs
[(reg
->targno
& 0xff) % env
->config
->nareg
] = tmp
;
1732 xtensa_sync_window_from_phys(env
);
1736 env
->sregs
[reg
->targno
& 0xff] = tmp
;
1740 env
->uregs
[reg
->targno
& 0xff] = tmp
;
1744 env
->fregs
[reg
->targno
& 0x0f] = make_float32(tmp
);
1748 env
->regs
[reg
->targno
& 0x0f] = tmp
;
1752 qemu_log("%s to reg %d of unsupported type %d\n",
1753 __func__
, n
, reg
->type
);
1761 #define NUM_CORE_REGS 0
1763 static int cpu_gdb_read_register(CPUArchState
*env
, uint8_t *mem_buf
, int n
)
1768 static int cpu_gdb_write_register(CPUArchState
*env
, uint8_t *mem_buf
, int n
)
1775 #if !defined(TARGET_XTENSA)
1776 static int num_g_regs
= NUM_CORE_REGS
;
1780 /* Encode data using the encoding for 'x' packets. */
1781 static int memtox(char *buf
, const char *mem
, int len
)
1789 case '#': case '$': case '*': case '}':
1801 static const char *get_feature_xml(const char *p
, const char **newp
)
1806 static char target_xml
[1024];
1809 while (p
[len
] && p
[len
] != ':')
1814 if (strncmp(p
, "target.xml", len
) == 0) {
1815 /* Generate the XML description for this CPU. */
1816 if (!target_xml
[0]) {
1817 GDBRegisterState
*r
;
1819 snprintf(target_xml
, sizeof(target_xml
),
1820 "<?xml version=\"1.0\"?>"
1821 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1823 "<xi:include href=\"%s\"/>",
1826 for (r
= first_cpu
->gdb_regs
; r
; r
= r
->next
) {
1827 pstrcat(target_xml
, sizeof(target_xml
), "<xi:include href=\"");
1828 pstrcat(target_xml
, sizeof(target_xml
), r
->xml
);
1829 pstrcat(target_xml
, sizeof(target_xml
), "\"/>");
1831 pstrcat(target_xml
, sizeof(target_xml
), "</target>");
1835 for (i
= 0; ; i
++) {
1836 name
= xml_builtin
[i
][0];
1837 if (!name
|| (strncmp(name
, p
, len
) == 0 && strlen(name
) == len
))
1840 return name
? xml_builtin
[i
][1] : NULL
;
1844 static int gdb_read_register(CPUArchState
*env
, uint8_t *mem_buf
, int reg
)
1846 GDBRegisterState
*r
;
1848 if (reg
< NUM_CORE_REGS
)
1849 return cpu_gdb_read_register(env
, mem_buf
, reg
);
1851 for (r
= env
->gdb_regs
; r
; r
= r
->next
) {
1852 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
1853 return r
->get_reg(env
, mem_buf
, reg
- r
->base_reg
);
1859 static int gdb_write_register(CPUArchState
*env
, uint8_t *mem_buf
, int reg
)
1861 GDBRegisterState
*r
;
1863 if (reg
< NUM_CORE_REGS
)
1864 return cpu_gdb_write_register(env
, mem_buf
, reg
);
1866 for (r
= env
->gdb_regs
; r
; r
= r
->next
) {
1867 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
1868 return r
->set_reg(env
, mem_buf
, reg
- r
->base_reg
);
1874 #if !defined(TARGET_XTENSA)
1875 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1876 specifies the first register number and these registers are included in
1877 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1878 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1881 void gdb_register_coprocessor(CPUArchState
* env
,
1882 gdb_reg_cb get_reg
, gdb_reg_cb set_reg
,
1883 int num_regs
, const char *xml
, int g_pos
)
1885 GDBRegisterState
*s
;
1886 GDBRegisterState
**p
;
1887 static int last_reg
= NUM_CORE_REGS
;
1891 /* Check for duplicates. */
1892 if (strcmp((*p
)->xml
, xml
) == 0)
1897 s
= g_new0(GDBRegisterState
, 1);
1898 s
->base_reg
= last_reg
;
1899 s
->num_regs
= num_regs
;
1900 s
->get_reg
= get_reg
;
1901 s
->set_reg
= set_reg
;
1904 /* Add to end of list. */
1905 last_reg
+= num_regs
;
1908 if (g_pos
!= s
->base_reg
) {
1909 fprintf(stderr
, "Error: Bad gdb register numbering for '%s'\n"
1910 "Expected %d got %d\n", xml
, g_pos
, s
->base_reg
);
1912 num_g_regs
= last_reg
;
1918 #ifndef CONFIG_USER_ONLY
1919 static const int xlat_gdb_type
[] = {
1920 [GDB_WATCHPOINT_WRITE
] = BP_GDB
| BP_MEM_WRITE
,
1921 [GDB_WATCHPOINT_READ
] = BP_GDB
| BP_MEM_READ
,
1922 [GDB_WATCHPOINT_ACCESS
] = BP_GDB
| BP_MEM_ACCESS
,
1926 static int gdb_breakpoint_insert(target_ulong addr
, target_ulong len
, int type
)
1932 return kvm_insert_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
1935 case GDB_BREAKPOINT_SW
:
1936 case GDB_BREAKPOINT_HW
:
1937 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1938 err
= cpu_breakpoint_insert(env
, addr
, BP_GDB
, NULL
);
1943 #ifndef CONFIG_USER_ONLY
1944 case GDB_WATCHPOINT_WRITE
:
1945 case GDB_WATCHPOINT_READ
:
1946 case GDB_WATCHPOINT_ACCESS
:
1947 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1948 err
= cpu_watchpoint_insert(env
, addr
, len
, xlat_gdb_type
[type
],
1960 static int gdb_breakpoint_remove(target_ulong addr
, target_ulong len
, int type
)
1966 return kvm_remove_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
1969 case GDB_BREAKPOINT_SW
:
1970 case GDB_BREAKPOINT_HW
:
1971 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1972 err
= cpu_breakpoint_remove(env
, addr
, BP_GDB
);
1977 #ifndef CONFIG_USER_ONLY
1978 case GDB_WATCHPOINT_WRITE
:
1979 case GDB_WATCHPOINT_READ
:
1980 case GDB_WATCHPOINT_ACCESS
:
1981 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1982 err
= cpu_watchpoint_remove(env
, addr
, len
, xlat_gdb_type
[type
]);
1993 static void gdb_breakpoint_remove_all(void)
1997 if (kvm_enabled()) {
1998 kvm_remove_all_breakpoints(gdbserver_state
->c_cpu
);
2002 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2003 cpu_breakpoint_remove_all(env
, BP_GDB
);
2004 #ifndef CONFIG_USER_ONLY
2005 cpu_watchpoint_remove_all(env
, BP_GDB
);
2010 static void gdb_set_cpu_pc(GDBState
*s
, target_ulong pc
)
2012 cpu_synchronize_state(s
->c_cpu
);
2013 #if defined(TARGET_I386)
2015 #elif defined (TARGET_PPC)
2017 #elif defined (TARGET_SPARC)
2019 s
->c_cpu
->npc
= pc
+ 4;
2020 #elif defined (TARGET_ARM)
2021 s
->c_cpu
->regs
[15] = pc
;
2022 #elif defined (TARGET_SH4)
2024 #elif defined (TARGET_MIPS)
2025 s
->c_cpu
->active_tc
.PC
= pc
& ~(target_ulong
)1;
2027 s
->c_cpu
->hflags
|= MIPS_HFLAG_M16
;
2029 s
->c_cpu
->hflags
&= ~(MIPS_HFLAG_M16
);
2031 #elif defined (TARGET_MICROBLAZE)
2032 s
->c_cpu
->sregs
[SR_PC
] = pc
;
2033 #elif defined(TARGET_OPENRISC)
2035 #elif defined (TARGET_CRIS)
2037 #elif defined (TARGET_ALPHA)
2039 #elif defined (TARGET_S390X)
2040 s
->c_cpu
->psw
.addr
= pc
;
2041 #elif defined (TARGET_LM32)
2043 #elif defined(TARGET_XTENSA)
2048 static CPUArchState
*find_cpu(uint32_t thread_id
)
2052 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2053 if (cpu_index(env
) == thread_id
) {
2061 static int gdb_handle_packet(GDBState
*s
, const char *line_buf
)
2066 int ch
, reg_size
, type
, res
;
2067 char buf
[MAX_PACKET_LENGTH
];
2068 uint8_t mem_buf
[MAX_PACKET_LENGTH
];
2070 target_ulong addr
, len
;
2073 printf("command='%s'\n", line_buf
);
2079 /* TODO: Make this return the correct value for user-mode. */
2080 snprintf(buf
, sizeof(buf
), "T%02xthread:%02x;", GDB_SIGNAL_TRAP
,
2081 cpu_index(s
->c_cpu
));
2083 /* Remove all the breakpoints when this query is issued,
2084 * because gdb is doing and initial connect and the state
2085 * should be cleaned up.
2087 gdb_breakpoint_remove_all();
2091 addr
= strtoull(p
, (char **)&p
, 16);
2092 gdb_set_cpu_pc(s
, addr
);
2098 s
->signal
= gdb_signal_to_target (strtoul(p
, (char **)&p
, 16));
2099 if (s
->signal
== -1)
2104 if (strncmp(p
, "Cont", 4) == 0) {
2105 int res_signal
, res_thread
;
2109 put_packet(s
, "vCont;c;C;s;S");
2124 if (action
== 'C' || action
== 'S') {
2125 signal
= strtoul(p
, (char **)&p
, 16);
2126 } else if (action
!= 'c' && action
!= 's') {
2132 thread
= strtoull(p
+1, (char **)&p
, 16);
2134 action
= tolower(action
);
2135 if (res
== 0 || (res
== 'c' && action
== 's')) {
2137 res_signal
= signal
;
2138 res_thread
= thread
;
2142 if (res_thread
!= -1 && res_thread
!= 0) {
2143 env
= find_cpu(res_thread
);
2145 put_packet(s
, "E22");
2151 cpu_single_step(s
->c_cpu
, sstep_flags
);
2153 s
->signal
= res_signal
;
2159 goto unknown_command
;
2162 #ifdef CONFIG_USER_ONLY
2163 /* Kill the target */
2164 fprintf(stderr
, "\nQEMU: Terminated via GDBstub\n");
2169 gdb_breakpoint_remove_all();
2170 gdb_syscall_mode
= GDB_SYS_DISABLED
;
2172 put_packet(s
, "OK");
2176 addr
= strtoull(p
, (char **)&p
, 16);
2177 gdb_set_cpu_pc(s
, addr
);
2179 cpu_single_step(s
->c_cpu
, sstep_flags
);
2187 ret
= strtoull(p
, (char **)&p
, 16);
2190 err
= strtoull(p
, (char **)&p
, 16);
2197 if (s
->current_syscall_cb
) {
2198 s
->current_syscall_cb(s
->c_cpu
, ret
, err
);
2199 s
->current_syscall_cb
= NULL
;
2202 put_packet(s
, "T02");
2209 cpu_synchronize_state(s
->g_cpu
);
2212 for (addr
= 0; addr
< num_g_regs
; addr
++) {
2213 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
+ len
, addr
);
2216 memtohex(buf
, mem_buf
, len
);
2220 cpu_synchronize_state(s
->g_cpu
);
2222 registers
= mem_buf
;
2223 len
= strlen(p
) / 2;
2224 hextomem((uint8_t *)registers
, p
, len
);
2225 for (addr
= 0; addr
< num_g_regs
&& len
> 0; addr
++) {
2226 reg_size
= gdb_write_register(s
->g_cpu
, registers
, addr
);
2228 registers
+= reg_size
;
2230 put_packet(s
, "OK");
2233 addr
= strtoull(p
, (char **)&p
, 16);
2236 len
= strtoull(p
, NULL
, 16);
2237 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
, 0) != 0) {
2238 put_packet (s
, "E14");
2240 memtohex(buf
, mem_buf
, len
);
2245 addr
= strtoull(p
, (char **)&p
, 16);
2248 len
= strtoull(p
, (char **)&p
, 16);
2251 hextomem(mem_buf
, p
, len
);
2252 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
, 1) != 0) {
2253 put_packet(s
, "E14");
2255 put_packet(s
, "OK");
2259 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2260 This works, but can be very slow. Anything new enough to
2261 understand XML also knows how to use this properly. */
2263 goto unknown_command
;
2264 addr
= strtoull(p
, (char **)&p
, 16);
2265 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
, addr
);
2267 memtohex(buf
, mem_buf
, reg_size
);
2270 put_packet(s
, "E14");
2275 goto unknown_command
;
2276 addr
= strtoull(p
, (char **)&p
, 16);
2279 reg_size
= strlen(p
) / 2;
2280 hextomem(mem_buf
, p
, reg_size
);
2281 gdb_write_register(s
->g_cpu
, mem_buf
, addr
);
2282 put_packet(s
, "OK");
2286 type
= strtoul(p
, (char **)&p
, 16);
2289 addr
= strtoull(p
, (char **)&p
, 16);
2292 len
= strtoull(p
, (char **)&p
, 16);
2294 res
= gdb_breakpoint_insert(addr
, len
, type
);
2296 res
= gdb_breakpoint_remove(addr
, len
, type
);
2298 put_packet(s
, "OK");
2299 else if (res
== -ENOSYS
)
2302 put_packet(s
, "E22");
2306 thread
= strtoull(p
, (char **)&p
, 16);
2307 if (thread
== -1 || thread
== 0) {
2308 put_packet(s
, "OK");
2311 env
= find_cpu(thread
);
2313 put_packet(s
, "E22");
2319 put_packet(s
, "OK");
2323 put_packet(s
, "OK");
2326 put_packet(s
, "E22");
2331 thread
= strtoull(p
, (char **)&p
, 16);
2332 env
= find_cpu(thread
);
2335 put_packet(s
, "OK");
2337 put_packet(s
, "E22");
2342 /* parse any 'q' packets here */
2343 if (!strcmp(p
,"qemu.sstepbits")) {
2344 /* Query Breakpoint bit definitions */
2345 snprintf(buf
, sizeof(buf
), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2351 } else if (strncmp(p
,"qemu.sstep",10) == 0) {
2352 /* Display or change the sstep_flags */
2355 /* Display current setting */
2356 snprintf(buf
, sizeof(buf
), "0x%x", sstep_flags
);
2361 type
= strtoul(p
, (char **)&p
, 16);
2363 put_packet(s
, "OK");
2365 } else if (strcmp(p
,"C") == 0) {
2366 /* "Current thread" remains vague in the spec, so always return
2367 * the first CPU (gdb returns the first thread). */
2368 put_packet(s
, "QC1");
2370 } else if (strcmp(p
,"fThreadInfo") == 0) {
2371 s
->query_cpu
= first_cpu
;
2372 goto report_cpuinfo
;
2373 } else if (strcmp(p
,"sThreadInfo") == 0) {
2376 snprintf(buf
, sizeof(buf
), "m%x", cpu_index(s
->query_cpu
));
2378 s
->query_cpu
= s
->query_cpu
->next_cpu
;
2382 } else if (strncmp(p
,"ThreadExtraInfo,", 16) == 0) {
2383 thread
= strtoull(p
+16, (char **)&p
, 16);
2384 env
= find_cpu(thread
);
2386 cpu_synchronize_state(env
);
2387 len
= snprintf((char *)mem_buf
, sizeof(mem_buf
),
2388 "CPU#%d [%s]", env
->cpu_index
,
2389 env
->halted
? "halted " : "running");
2390 memtohex(buf
, mem_buf
, len
);
2395 #ifdef CONFIG_USER_ONLY
2396 else if (strncmp(p
, "Offsets", 7) == 0) {
2397 TaskState
*ts
= s
->c_cpu
->opaque
;
2399 snprintf(buf
, sizeof(buf
),
2400 "Text=" TARGET_ABI_FMT_lx
";Data=" TARGET_ABI_FMT_lx
2401 ";Bss=" TARGET_ABI_FMT_lx
,
2402 ts
->info
->code_offset
,
2403 ts
->info
->data_offset
,
2404 ts
->info
->data_offset
);
2408 #else /* !CONFIG_USER_ONLY */
2409 else if (strncmp(p
, "Rcmd,", 5) == 0) {
2410 int len
= strlen(p
+ 5);
2412 if ((len
% 2) != 0) {
2413 put_packet(s
, "E01");
2416 hextomem(mem_buf
, p
+ 5, len
);
2419 qemu_chr_be_write(s
->mon_chr
, mem_buf
, len
);
2420 put_packet(s
, "OK");
2423 #endif /* !CONFIG_USER_ONLY */
2424 if (strncmp(p
, "Supported", 9) == 0) {
2425 snprintf(buf
, sizeof(buf
), "PacketSize=%x", MAX_PACKET_LENGTH
);
2427 pstrcat(buf
, sizeof(buf
), ";qXfer:features:read+");
2433 if (strncmp(p
, "Xfer:features:read:", 19) == 0) {
2435 target_ulong total_len
;
2439 xml
= get_feature_xml(p
, &p
);
2441 snprintf(buf
, sizeof(buf
), "E00");
2448 addr
= strtoul(p
, (char **)&p
, 16);
2451 len
= strtoul(p
, (char **)&p
, 16);
2453 total_len
= strlen(xml
);
2454 if (addr
> total_len
) {
2455 snprintf(buf
, sizeof(buf
), "E00");
2459 if (len
> (MAX_PACKET_LENGTH
- 5) / 2)
2460 len
= (MAX_PACKET_LENGTH
- 5) / 2;
2461 if (len
< total_len
- addr
) {
2463 len
= memtox(buf
+ 1, xml
+ addr
, len
);
2466 len
= memtox(buf
+ 1, xml
+ addr
, total_len
- addr
);
2468 put_packet_binary(s
, buf
, len
+ 1);
2472 /* Unrecognised 'q' command. */
2473 goto unknown_command
;
2477 /* put empty packet */
2485 void gdb_set_stop_cpu(CPUArchState
*env
)
2487 gdbserver_state
->c_cpu
= env
;
2488 gdbserver_state
->g_cpu
= env
;
2491 #ifndef CONFIG_USER_ONLY
2492 static void gdb_vm_state_change(void *opaque
, int running
, RunState state
)
2494 GDBState
*s
= gdbserver_state
;
2495 CPUArchState
*env
= s
->c_cpu
;
2500 if (running
|| s
->state
== RS_INACTIVE
) {
2503 /* Is there a GDB syscall waiting to be sent? */
2504 if (s
->current_syscall_cb
) {
2505 put_packet(s
, s
->syscall_buf
);
2509 case RUN_STATE_DEBUG
:
2510 if (env
->watchpoint_hit
) {
2511 switch (env
->watchpoint_hit
->flags
& BP_MEM_ACCESS
) {
2522 snprintf(buf
, sizeof(buf
),
2523 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx
";",
2524 GDB_SIGNAL_TRAP
, cpu_index(env
), type
,
2525 env
->watchpoint_hit
->vaddr
);
2526 env
->watchpoint_hit
= NULL
;
2530 ret
= GDB_SIGNAL_TRAP
;
2532 case RUN_STATE_PAUSED
:
2533 ret
= GDB_SIGNAL_INT
;
2535 case RUN_STATE_SHUTDOWN
:
2536 ret
= GDB_SIGNAL_QUIT
;
2538 case RUN_STATE_IO_ERROR
:
2539 ret
= GDB_SIGNAL_IO
;
2541 case RUN_STATE_WATCHDOG
:
2542 ret
= GDB_SIGNAL_ALRM
;
2544 case RUN_STATE_INTERNAL_ERROR
:
2545 ret
= GDB_SIGNAL_ABRT
;
2547 case RUN_STATE_SAVE_VM
:
2548 case RUN_STATE_RESTORE_VM
:
2550 case RUN_STATE_FINISH_MIGRATE
:
2551 ret
= GDB_SIGNAL_XCPU
;
2554 ret
= GDB_SIGNAL_UNKNOWN
;
2557 snprintf(buf
, sizeof(buf
), "T%02xthread:%02x;", ret
, cpu_index(env
));
2562 /* disable single step if it was enabled */
2563 cpu_single_step(env
, 0);
2567 /* Send a gdb syscall request.
2568 This accepts limited printf-style format specifiers, specifically:
2569 %x - target_ulong argument printed in hex.
2570 %lx - 64-bit argument printed in hex.
2571 %s - string pointer (target_ulong) and length (int) pair. */
2572 void gdb_do_syscall(gdb_syscall_complete_cb cb
, const char *fmt
, ...)
2581 s
= gdbserver_state
;
2584 s
->current_syscall_cb
= cb
;
2585 #ifndef CONFIG_USER_ONLY
2586 vm_stop(RUN_STATE_DEBUG
);
2590 p_end
= &s
->syscall_buf
[sizeof(s
->syscall_buf
)];
2597 addr
= va_arg(va
, target_ulong
);
2598 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
, addr
);
2601 if (*(fmt
++) != 'x')
2603 i64
= va_arg(va
, uint64_t);
2604 p
+= snprintf(p
, p_end
- p
, "%" PRIx64
, i64
);
2607 addr
= va_arg(va
, target_ulong
);
2608 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
"/%x",
2609 addr
, va_arg(va
, int));
2613 fprintf(stderr
, "gdbstub: Bad syscall format string '%s'\n",
2623 #ifdef CONFIG_USER_ONLY
2624 put_packet(s
, s
->syscall_buf
);
2625 gdb_handlesig(s
->c_cpu
, 0);
2627 /* In this case wait to send the syscall packet until notification that
2628 the CPU has stopped. This must be done because if the packet is sent
2629 now the reply from the syscall request could be received while the CPU
2630 is still in the running state, which can cause packets to be dropped
2631 and state transition 'T' packets to be sent while the syscall is still
2637 static void gdb_read_byte(GDBState
*s
, int ch
)
2642 #ifndef CONFIG_USER_ONLY
2643 if (s
->last_packet_len
) {
2644 /* Waiting for a response to the last packet. If we see the start
2645 of a new command then abandon the previous response. */
2648 printf("Got NACK, retransmitting\n");
2650 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
2654 printf("Got ACK\n");
2656 printf("Got '%c' when expecting ACK/NACK\n", ch
);
2658 if (ch
== '+' || ch
== '$')
2659 s
->last_packet_len
= 0;
2663 if (runstate_is_running()) {
2664 /* when the CPU is running, we cannot do anything except stop
2665 it when receiving a char */
2666 vm_stop(RUN_STATE_PAUSED
);
2673 s
->line_buf_index
= 0;
2674 s
->state
= RS_GETLINE
;
2679 s
->state
= RS_CHKSUM1
;
2680 } else if (s
->line_buf_index
>= sizeof(s
->line_buf
) - 1) {
2683 s
->line_buf
[s
->line_buf_index
++] = ch
;
2687 s
->line_buf
[s
->line_buf_index
] = '\0';
2688 s
->line_csum
= fromhex(ch
) << 4;
2689 s
->state
= RS_CHKSUM2
;
2692 s
->line_csum
|= fromhex(ch
);
2694 for(i
= 0; i
< s
->line_buf_index
; i
++) {
2695 csum
+= s
->line_buf
[i
];
2697 if (s
->line_csum
!= (csum
& 0xff)) {
2699 put_buffer(s
, &reply
, 1);
2703 put_buffer(s
, &reply
, 1);
2704 s
->state
= gdb_handle_packet(s
, s
->line_buf
);
2713 /* Tell the remote gdb that the process has exited. */
2714 void gdb_exit(CPUArchState
*env
, int code
)
2719 s
= gdbserver_state
;
2723 #ifdef CONFIG_USER_ONLY
2724 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2729 snprintf(buf
, sizeof(buf
), "W%02x", (uint8_t)code
);
2732 #ifndef CONFIG_USER_ONLY
2734 qemu_chr_delete(s
->chr
);
2739 #ifdef CONFIG_USER_ONLY
2745 s
= gdbserver_state
;
2747 if (gdbserver_fd
< 0 || s
->fd
< 0)
2754 gdb_handlesig (CPUArchState
*env
, int sig
)
2760 s
= gdbserver_state
;
2761 if (gdbserver_fd
< 0 || s
->fd
< 0)
2764 /* disable single step if it was enabled */
2765 cpu_single_step(env
, 0);
2770 snprintf(buf
, sizeof(buf
), "S%02x", target_signal_to_gdb (sig
));
2773 /* put_packet() might have detected that the peer terminated the
2780 s
->running_state
= 0;
2781 while (s
->running_state
== 0) {
2782 n
= read (s
->fd
, buf
, 256);
2787 for (i
= 0; i
< n
; i
++)
2788 gdb_read_byte (s
, buf
[i
]);
2790 else if (n
== 0 || errno
!= EAGAIN
)
2792 /* XXX: Connection closed. Should probably wait for another
2793 connection before continuing. */
2802 /* Tell the remote gdb that the process has exited due to SIG. */
2803 void gdb_signalled(CPUArchState
*env
, int sig
)
2808 s
= gdbserver_state
;
2809 if (gdbserver_fd
< 0 || s
->fd
< 0)
2812 snprintf(buf
, sizeof(buf
), "X%02x", target_signal_to_gdb (sig
));
2816 static void gdb_accept(void)
2819 struct sockaddr_in sockaddr
;
2824 len
= sizeof(sockaddr
);
2825 fd
= accept(gdbserver_fd
, (struct sockaddr
*)&sockaddr
, &len
);
2826 if (fd
< 0 && errno
!= EINTR
) {
2829 } else if (fd
>= 0) {
2831 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
2837 /* set short latency */
2839 setsockopt(fd
, IPPROTO_TCP
, TCP_NODELAY
, (char *)&val
, sizeof(val
));
2841 s
= g_malloc0(sizeof(GDBState
));
2842 s
->c_cpu
= first_cpu
;
2843 s
->g_cpu
= first_cpu
;
2847 gdbserver_state
= s
;
2849 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
2852 static int gdbserver_open(int port
)
2854 struct sockaddr_in sockaddr
;
2857 fd
= socket(PF_INET
, SOCK_STREAM
, 0);
2863 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
2866 /* allow fast reuse */
2868 setsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, (char *)&val
, sizeof(val
));
2870 sockaddr
.sin_family
= AF_INET
;
2871 sockaddr
.sin_port
= htons(port
);
2872 sockaddr
.sin_addr
.s_addr
= 0;
2873 ret
= bind(fd
, (struct sockaddr
*)&sockaddr
, sizeof(sockaddr
));
2879 ret
= listen(fd
, 0);
2888 int gdbserver_start(int port
)
2890 gdbserver_fd
= gdbserver_open(port
);
2891 if (gdbserver_fd
< 0)
2893 /* accept connections */
2898 /* Disable gdb stub for child processes. */
2899 void gdbserver_fork(CPUArchState
*env
)
2901 GDBState
*s
= gdbserver_state
;
2902 if (gdbserver_fd
< 0 || s
->fd
< 0)
2906 cpu_breakpoint_remove_all(env
, BP_GDB
);
2907 cpu_watchpoint_remove_all(env
, BP_GDB
);
2910 static int gdb_chr_can_receive(void *opaque
)
2912 /* We can handle an arbitrarily large amount of data.
2913 Pick the maximum packet size, which is as good as anything. */
2914 return MAX_PACKET_LENGTH
;
2917 static void gdb_chr_receive(void *opaque
, const uint8_t *buf
, int size
)
2921 for (i
= 0; i
< size
; i
++) {
2922 gdb_read_byte(gdbserver_state
, buf
[i
]);
2926 static void gdb_chr_event(void *opaque
, int event
)
2929 case CHR_EVENT_OPENED
:
2930 vm_stop(RUN_STATE_PAUSED
);
2938 static void gdb_monitor_output(GDBState
*s
, const char *msg
, int len
)
2940 char buf
[MAX_PACKET_LENGTH
];
2943 if (len
> (MAX_PACKET_LENGTH
/2) - 1)
2944 len
= (MAX_PACKET_LENGTH
/2) - 1;
2945 memtohex(buf
+ 1, (uint8_t *)msg
, len
);
2949 static int gdb_monitor_write(CharDriverState
*chr
, const uint8_t *buf
, int len
)
2951 const char *p
= (const char *)buf
;
2954 max_sz
= (sizeof(gdbserver_state
->last_packet
) - 2) / 2;
2956 if (len
<= max_sz
) {
2957 gdb_monitor_output(gdbserver_state
, p
, len
);
2960 gdb_monitor_output(gdbserver_state
, p
, max_sz
);
2968 static void gdb_sigterm_handler(int signal
)
2970 if (runstate_is_running()) {
2971 vm_stop(RUN_STATE_PAUSED
);
2976 int gdbserver_start(const char *device
)
2979 char gdbstub_device_name
[128];
2980 CharDriverState
*chr
= NULL
;
2981 CharDriverState
*mon_chr
;
2985 if (strcmp(device
, "none") != 0) {
2986 if (strstart(device
, "tcp:", NULL
)) {
2987 /* enforce required TCP attributes */
2988 snprintf(gdbstub_device_name
, sizeof(gdbstub_device_name
),
2989 "%s,nowait,nodelay,server", device
);
2990 device
= gdbstub_device_name
;
2993 else if (strcmp(device
, "stdio") == 0) {
2994 struct sigaction act
;
2996 memset(&act
, 0, sizeof(act
));
2997 act
.sa_handler
= gdb_sigterm_handler
;
2998 sigaction(SIGINT
, &act
, NULL
);
3001 chr
= qemu_chr_new("gdb", device
, NULL
);
3005 qemu_chr_add_handlers(chr
, gdb_chr_can_receive
, gdb_chr_receive
,
3006 gdb_chr_event
, NULL
);
3009 s
= gdbserver_state
;
3011 s
= g_malloc0(sizeof(GDBState
));
3012 gdbserver_state
= s
;
3014 qemu_add_vm_change_state_handler(gdb_vm_state_change
, NULL
);
3016 /* Initialize a monitor terminal for gdb */
3017 mon_chr
= g_malloc0(sizeof(*mon_chr
));
3018 mon_chr
->chr_write
= gdb_monitor_write
;
3019 monitor_init(mon_chr
, 0);
3022 qemu_chr_delete(s
->chr
);
3023 mon_chr
= s
->mon_chr
;
3024 memset(s
, 0, sizeof(GDBState
));
3026 s
->c_cpu
= first_cpu
;
3027 s
->g_cpu
= first_cpu
;
3029 s
->state
= chr
? RS_IDLE
: RS_INACTIVE
;
3030 s
->mon_chr
= mon_chr
;
3031 s
->current_syscall_cb
= NULL
;