target-ppc: Use NARROW_MODE macro for tlbie
[qemu/agraf.git] / gdbstub.c
blob5167c64bdf34a7cffb947c0075d2aaf83f02f69d
1 /*
2 * gdb server stub
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
30 #include "qemu.h"
31 #else
32 #include "monitor/monitor.h"
33 #include "char/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
36 #endif
38 #define MAX_PACKET_LENGTH 4096
40 #include "cpu.h"
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
45 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
46 static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
47 uint8_t *buf, int len, int is_write)
49 return cpu_memory_rw_debug(env, addr, buf, len, is_write);
51 #else
52 /* target_memory_rw_debug() defined in cpu.h */
53 #endif
55 enum {
56 GDB_SIGNAL_0 = 0,
57 GDB_SIGNAL_INT = 2,
58 GDB_SIGNAL_QUIT = 3,
59 GDB_SIGNAL_TRAP = 5,
60 GDB_SIGNAL_ABRT = 6,
61 GDB_SIGNAL_ALRM = 14,
62 GDB_SIGNAL_IO = 23,
63 GDB_SIGNAL_XCPU = 24,
64 GDB_SIGNAL_UNKNOWN = 143
67 #ifdef CONFIG_USER_ONLY
69 /* Map target signal numbers to GDB protocol signal numbers and vice
70 * versa. For user emulation's currently supported systems, we can
71 * assume most signals are defined.
74 static int gdb_signal_table[] = {
76 TARGET_SIGHUP,
77 TARGET_SIGINT,
78 TARGET_SIGQUIT,
79 TARGET_SIGILL,
80 TARGET_SIGTRAP,
81 TARGET_SIGABRT,
82 -1, /* SIGEMT */
83 TARGET_SIGFPE,
84 TARGET_SIGKILL,
85 TARGET_SIGBUS,
86 TARGET_SIGSEGV,
87 TARGET_SIGSYS,
88 TARGET_SIGPIPE,
89 TARGET_SIGALRM,
90 TARGET_SIGTERM,
91 TARGET_SIGURG,
92 TARGET_SIGSTOP,
93 TARGET_SIGTSTP,
94 TARGET_SIGCONT,
95 TARGET_SIGCHLD,
96 TARGET_SIGTTIN,
97 TARGET_SIGTTOU,
98 TARGET_SIGIO,
99 TARGET_SIGXCPU,
100 TARGET_SIGXFSZ,
101 TARGET_SIGVTALRM,
102 TARGET_SIGPROF,
103 TARGET_SIGWINCH,
104 -1, /* SIGLOST */
105 TARGET_SIGUSR1,
106 TARGET_SIGUSR2,
107 #ifdef TARGET_SIGPWR
108 TARGET_SIGPWR,
109 #else
111 #endif
112 -1, /* SIGPOLL */
124 #ifdef __SIGRTMIN
125 __SIGRTMIN + 1,
126 __SIGRTMIN + 2,
127 __SIGRTMIN + 3,
128 __SIGRTMIN + 4,
129 __SIGRTMIN + 5,
130 __SIGRTMIN + 6,
131 __SIGRTMIN + 7,
132 __SIGRTMIN + 8,
133 __SIGRTMIN + 9,
134 __SIGRTMIN + 10,
135 __SIGRTMIN + 11,
136 __SIGRTMIN + 12,
137 __SIGRTMIN + 13,
138 __SIGRTMIN + 14,
139 __SIGRTMIN + 15,
140 __SIGRTMIN + 16,
141 __SIGRTMIN + 17,
142 __SIGRTMIN + 18,
143 __SIGRTMIN + 19,
144 __SIGRTMIN + 20,
145 __SIGRTMIN + 21,
146 __SIGRTMIN + 22,
147 __SIGRTMIN + 23,
148 __SIGRTMIN + 24,
149 __SIGRTMIN + 25,
150 __SIGRTMIN + 26,
151 __SIGRTMIN + 27,
152 __SIGRTMIN + 28,
153 __SIGRTMIN + 29,
154 __SIGRTMIN + 30,
155 __SIGRTMIN + 31,
156 -1, /* SIGCANCEL */
157 __SIGRTMIN,
158 __SIGRTMIN + 32,
159 __SIGRTMIN + 33,
160 __SIGRTMIN + 34,
161 __SIGRTMIN + 35,
162 __SIGRTMIN + 36,
163 __SIGRTMIN + 37,
164 __SIGRTMIN + 38,
165 __SIGRTMIN + 39,
166 __SIGRTMIN + 40,
167 __SIGRTMIN + 41,
168 __SIGRTMIN + 42,
169 __SIGRTMIN + 43,
170 __SIGRTMIN + 44,
171 __SIGRTMIN + 45,
172 __SIGRTMIN + 46,
173 __SIGRTMIN + 47,
174 __SIGRTMIN + 48,
175 __SIGRTMIN + 49,
176 __SIGRTMIN + 50,
177 __SIGRTMIN + 51,
178 __SIGRTMIN + 52,
179 __SIGRTMIN + 53,
180 __SIGRTMIN + 54,
181 __SIGRTMIN + 55,
182 __SIGRTMIN + 56,
183 __SIGRTMIN + 57,
184 __SIGRTMIN + 58,
185 __SIGRTMIN + 59,
186 __SIGRTMIN + 60,
187 __SIGRTMIN + 61,
188 __SIGRTMIN + 62,
189 __SIGRTMIN + 63,
190 __SIGRTMIN + 64,
191 __SIGRTMIN + 65,
192 __SIGRTMIN + 66,
193 __SIGRTMIN + 67,
194 __SIGRTMIN + 68,
195 __SIGRTMIN + 69,
196 __SIGRTMIN + 70,
197 __SIGRTMIN + 71,
198 __SIGRTMIN + 72,
199 __SIGRTMIN + 73,
200 __SIGRTMIN + 74,
201 __SIGRTMIN + 75,
202 __SIGRTMIN + 76,
203 __SIGRTMIN + 77,
204 __SIGRTMIN + 78,
205 __SIGRTMIN + 79,
206 __SIGRTMIN + 80,
207 __SIGRTMIN + 81,
208 __SIGRTMIN + 82,
209 __SIGRTMIN + 83,
210 __SIGRTMIN + 84,
211 __SIGRTMIN + 85,
212 __SIGRTMIN + 86,
213 __SIGRTMIN + 87,
214 __SIGRTMIN + 88,
215 __SIGRTMIN + 89,
216 __SIGRTMIN + 90,
217 __SIGRTMIN + 91,
218 __SIGRTMIN + 92,
219 __SIGRTMIN + 93,
220 __SIGRTMIN + 94,
221 __SIGRTMIN + 95,
222 -1, /* SIGINFO */
223 -1, /* UNKNOWN */
224 -1, /* DEFAULT */
231 #endif
233 #else
234 /* In system mode we only need SIGINT and SIGTRAP; other signals
235 are not yet supported. */
237 enum {
238 TARGET_SIGINT = 2,
239 TARGET_SIGTRAP = 5
242 static int gdb_signal_table[] = {
245 TARGET_SIGINT,
248 TARGET_SIGTRAP
250 #endif
252 #ifdef CONFIG_USER_ONLY
253 static int target_signal_to_gdb (int sig)
255 int i;
256 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
257 if (gdb_signal_table[i] == sig)
258 return i;
259 return GDB_SIGNAL_UNKNOWN;
261 #endif
263 static int gdb_signal_to_target (int sig)
265 if (sig < ARRAY_SIZE (gdb_signal_table))
266 return gdb_signal_table[sig];
267 else
268 return -1;
271 //#define DEBUG_GDB
273 typedef struct GDBRegisterState {
274 int base_reg;
275 int num_regs;
276 gdb_reg_cb get_reg;
277 gdb_reg_cb set_reg;
278 const char *xml;
279 struct GDBRegisterState *next;
280 } GDBRegisterState;
282 enum RSState {
283 RS_INACTIVE,
284 RS_IDLE,
285 RS_GETLINE,
286 RS_CHKSUM1,
287 RS_CHKSUM2,
289 typedef struct GDBState {
290 CPUArchState *c_cpu; /* current CPU for step/continue ops */
291 CPUArchState *g_cpu; /* current CPU for other ops */
292 CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
293 enum RSState state; /* parsing state */
294 char line_buf[MAX_PACKET_LENGTH];
295 int line_buf_index;
296 int line_csum;
297 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
298 int last_packet_len;
299 int signal;
300 #ifdef CONFIG_USER_ONLY
301 int fd;
302 int running_state;
303 #else
304 CharDriverState *chr;
305 CharDriverState *mon_chr;
306 #endif
307 char syscall_buf[256];
308 gdb_syscall_complete_cb current_syscall_cb;
309 } GDBState;
311 /* By default use no IRQs and no timers while single stepping so as to
312 * make single stepping like an ICE HW step.
314 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
316 static GDBState *gdbserver_state;
318 /* This is an ugly hack to cope with both new and old gdb.
319 If gdb sends qXfer:features:read then assume we're talking to a newish
320 gdb that understands target descriptions. */
321 static int gdb_has_xml;
323 #ifdef CONFIG_USER_ONLY
324 /* XXX: This is not thread safe. Do we care? */
325 static int gdbserver_fd = -1;
327 static int get_char(GDBState *s)
329 uint8_t ch;
330 int ret;
332 for(;;) {
333 ret = qemu_recv(s->fd, &ch, 1, 0);
334 if (ret < 0) {
335 if (errno == ECONNRESET)
336 s->fd = -1;
337 if (errno != EINTR && errno != EAGAIN)
338 return -1;
339 } else if (ret == 0) {
340 close(s->fd);
341 s->fd = -1;
342 return -1;
343 } else {
344 break;
347 return ch;
349 #endif
351 static enum {
352 GDB_SYS_UNKNOWN,
353 GDB_SYS_ENABLED,
354 GDB_SYS_DISABLED,
355 } gdb_syscall_mode;
357 /* If gdb is connected when the first semihosting syscall occurs then use
358 remote gdb syscalls. Otherwise use native file IO. */
359 int use_gdb_syscalls(void)
361 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
362 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
363 : GDB_SYS_DISABLED);
365 return gdb_syscall_mode == GDB_SYS_ENABLED;
368 /* Resume execution. */
369 static inline void gdb_continue(GDBState *s)
371 #ifdef CONFIG_USER_ONLY
372 s->running_state = 1;
373 #else
374 vm_start();
375 #endif
378 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
380 #ifdef CONFIG_USER_ONLY
381 int ret;
383 while (len > 0) {
384 ret = send(s->fd, buf, len, 0);
385 if (ret < 0) {
386 if (errno != EINTR && errno != EAGAIN)
387 return;
388 } else {
389 buf += ret;
390 len -= ret;
393 #else
394 qemu_chr_fe_write(s->chr, buf, len);
395 #endif
398 static inline int fromhex(int v)
400 if (v >= '0' && v <= '9')
401 return v - '0';
402 else if (v >= 'A' && v <= 'F')
403 return v - 'A' + 10;
404 else if (v >= 'a' && v <= 'f')
405 return v - 'a' + 10;
406 else
407 return 0;
410 static inline int tohex(int v)
412 if (v < 10)
413 return v + '0';
414 else
415 return v - 10 + 'a';
418 static void memtohex(char *buf, const uint8_t *mem, int len)
420 int i, c;
421 char *q;
422 q = buf;
423 for(i = 0; i < len; i++) {
424 c = mem[i];
425 *q++ = tohex(c >> 4);
426 *q++ = tohex(c & 0xf);
428 *q = '\0';
431 static void hextomem(uint8_t *mem, const char *buf, int len)
433 int i;
435 for(i = 0; i < len; i++) {
436 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
437 buf += 2;
441 /* return -1 if error, 0 if OK */
442 static int put_packet_binary(GDBState *s, const char *buf, int len)
444 int csum, i;
445 uint8_t *p;
447 for(;;) {
448 p = s->last_packet;
449 *(p++) = '$';
450 memcpy(p, buf, len);
451 p += len;
452 csum = 0;
453 for(i = 0; i < len; i++) {
454 csum += buf[i];
456 *(p++) = '#';
457 *(p++) = tohex((csum >> 4) & 0xf);
458 *(p++) = tohex((csum) & 0xf);
460 s->last_packet_len = p - s->last_packet;
461 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
463 #ifdef CONFIG_USER_ONLY
464 i = get_char(s);
465 if (i < 0)
466 return -1;
467 if (i == '+')
468 break;
469 #else
470 break;
471 #endif
473 return 0;
476 /* return -1 if error, 0 if OK */
477 static int put_packet(GDBState *s, const char *buf)
479 #ifdef DEBUG_GDB
480 printf("reply='%s'\n", buf);
481 #endif
483 return put_packet_binary(s, buf, strlen(buf));
486 /* The GDB remote protocol transfers values in target byte order. This means
487 we can use the raw memory access routines to access the value buffer.
488 Conveniently, these also handle the case where the buffer is mis-aligned.
490 #define GET_REG8(val) do { \
491 stb_p(mem_buf, val); \
492 return 1; \
493 } while(0)
494 #define GET_REG16(val) do { \
495 stw_p(mem_buf, val); \
496 return 2; \
497 } while(0)
498 #define GET_REG32(val) do { \
499 stl_p(mem_buf, val); \
500 return 4; \
501 } while(0)
502 #define GET_REG64(val) do { \
503 stq_p(mem_buf, val); \
504 return 8; \
505 } while(0)
507 #if TARGET_LONG_BITS == 64
508 #define GET_REGL(val) GET_REG64(val)
509 #define ldtul_p(addr) ldq_p(addr)
510 #else
511 #define GET_REGL(val) GET_REG32(val)
512 #define ldtul_p(addr) ldl_p(addr)
513 #endif
515 #if defined(TARGET_I386)
517 #ifdef TARGET_X86_64
518 static const int gpr_map[16] = {
519 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
520 8, 9, 10, 11, 12, 13, 14, 15
522 #else
523 #define gpr_map gpr_map32
524 #endif
525 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
527 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
529 #define IDX_IP_REG CPU_NB_REGS
530 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
531 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
532 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
533 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
534 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
536 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
538 if (n < CPU_NB_REGS) {
539 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
540 GET_REG64(env->regs[gpr_map[n]]);
541 } else if (n < CPU_NB_REGS32) {
542 GET_REG32(env->regs[gpr_map32[n]]);
544 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
545 #ifdef USE_X86LDOUBLE
546 /* FIXME: byteswap float values - after fixing fpregs layout. */
547 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
548 #else
549 memset(mem_buf, 0, 10);
550 #endif
551 return 10;
552 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
553 n -= IDX_XMM_REGS;
554 if (n < CPU_NB_REGS32 ||
555 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
556 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
557 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
558 return 16;
560 } else {
561 switch (n) {
562 case IDX_IP_REG:
563 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
564 GET_REG64(env->eip);
565 } else {
566 GET_REG32(env->eip);
568 case IDX_FLAGS_REG: GET_REG32(env->eflags);
570 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
571 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
572 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
573 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
574 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
575 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
577 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
578 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
579 (env->fpstt & 0x7) << 11);
580 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
581 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
582 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
583 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
584 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
585 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
587 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
590 return 0;
593 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
595 uint16_t selector = ldl_p(mem_buf);
597 if (selector != env->segs[sreg].selector) {
598 #if defined(CONFIG_USER_ONLY)
599 cpu_x86_load_seg(env, sreg, selector);
600 #else
601 unsigned int limit, flags;
602 target_ulong base;
604 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
605 base = selector << 4;
606 limit = 0xffff;
607 flags = 0;
608 } else {
609 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
610 return 4;
612 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
613 #endif
615 return 4;
618 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
620 uint32_t tmp;
622 if (n < CPU_NB_REGS) {
623 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
624 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
625 return sizeof(target_ulong);
626 } else if (n < CPU_NB_REGS32) {
627 n = gpr_map32[n];
628 env->regs[n] &= ~0xffffffffUL;
629 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
630 return 4;
632 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
633 #ifdef USE_X86LDOUBLE
634 /* FIXME: byteswap float values - after fixing fpregs layout. */
635 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
636 #endif
637 return 10;
638 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
639 n -= IDX_XMM_REGS;
640 if (n < CPU_NB_REGS32 ||
641 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
642 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
643 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
644 return 16;
646 } else {
647 switch (n) {
648 case IDX_IP_REG:
649 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
650 env->eip = ldq_p(mem_buf);
651 return 8;
652 } else {
653 env->eip &= ~0xffffffffUL;
654 env->eip |= (uint32_t)ldl_p(mem_buf);
655 return 4;
657 case IDX_FLAGS_REG:
658 env->eflags = ldl_p(mem_buf);
659 return 4;
661 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
662 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
663 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
664 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
665 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
666 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
668 case IDX_FP_REGS + 8:
669 env->fpuc = ldl_p(mem_buf);
670 return 4;
671 case IDX_FP_REGS + 9:
672 tmp = ldl_p(mem_buf);
673 env->fpstt = (tmp >> 11) & 7;
674 env->fpus = tmp & ~0x3800;
675 return 4;
676 case IDX_FP_REGS + 10: /* ftag */ return 4;
677 case IDX_FP_REGS + 11: /* fiseg */ return 4;
678 case IDX_FP_REGS + 12: /* fioff */ return 4;
679 case IDX_FP_REGS + 13: /* foseg */ return 4;
680 case IDX_FP_REGS + 14: /* fooff */ return 4;
681 case IDX_FP_REGS + 15: /* fop */ return 4;
683 case IDX_MXCSR_REG:
684 env->mxcsr = ldl_p(mem_buf);
685 return 4;
688 /* Unrecognised register. */
689 return 0;
692 #elif defined (TARGET_PPC)
694 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
695 expects whatever the target description contains. Due to a
696 historical mishap the FP registers appear in between core integer
697 regs and PC, MSR, CR, and so forth. We hack round this by giving the
698 FP regs zero size when talking to a newer gdb. */
699 #define NUM_CORE_REGS 71
700 #if defined (TARGET_PPC64)
701 #define GDB_CORE_XML "power64-core.xml"
702 #else
703 #define GDB_CORE_XML "power-core.xml"
704 #endif
706 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
708 if (n < 32) {
709 /* gprs */
710 GET_REGL(env->gpr[n]);
711 } else if (n < 64) {
712 /* fprs */
713 if (gdb_has_xml)
714 return 0;
715 stfq_p(mem_buf, env->fpr[n-32]);
716 return 8;
717 } else {
718 switch (n) {
719 case 64: GET_REGL(env->nip);
720 case 65: GET_REGL(env->msr);
721 case 66:
723 uint32_t cr = 0;
724 int i;
725 for (i = 0; i < 8; i++)
726 cr |= env->crf[i] << (32 - ((i + 1) * 4));
727 GET_REG32(cr);
729 case 67: GET_REGL(env->lr);
730 case 68: GET_REGL(env->ctr);
731 case 69: GET_REGL(env->xer);
732 case 70:
734 if (gdb_has_xml)
735 return 0;
736 GET_REG32(env->fpscr);
740 return 0;
743 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
745 if (n < 32) {
746 /* gprs */
747 env->gpr[n] = ldtul_p(mem_buf);
748 return sizeof(target_ulong);
749 } else if (n < 64) {
750 /* fprs */
751 if (gdb_has_xml)
752 return 0;
753 env->fpr[n-32] = ldfq_p(mem_buf);
754 return 8;
755 } else {
756 switch (n) {
757 case 64:
758 env->nip = ldtul_p(mem_buf);
759 return sizeof(target_ulong);
760 case 65:
761 ppc_store_msr(env, ldtul_p(mem_buf));
762 return sizeof(target_ulong);
763 case 66:
765 uint32_t cr = ldl_p(mem_buf);
766 int i;
767 for (i = 0; i < 8; i++)
768 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
769 return 4;
771 case 67:
772 env->lr = ldtul_p(mem_buf);
773 return sizeof(target_ulong);
774 case 68:
775 env->ctr = ldtul_p(mem_buf);
776 return sizeof(target_ulong);
777 case 69:
778 env->xer = ldtul_p(mem_buf);
779 return sizeof(target_ulong);
780 case 70:
781 /* fpscr */
782 if (gdb_has_xml)
783 return 0;
784 store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
785 return sizeof(target_ulong);
788 return 0;
791 #elif defined (TARGET_SPARC)
793 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
794 #define NUM_CORE_REGS 86
795 #else
796 #define NUM_CORE_REGS 72
797 #endif
799 #ifdef TARGET_ABI32
800 #define GET_REGA(val) GET_REG32(val)
801 #else
802 #define GET_REGA(val) GET_REGL(val)
803 #endif
805 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
807 if (n < 8) {
808 /* g0..g7 */
809 GET_REGA(env->gregs[n]);
811 if (n < 32) {
812 /* register window */
813 GET_REGA(env->regwptr[n - 8]);
815 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
816 if (n < 64) {
817 /* fprs */
818 if (n & 1) {
819 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
820 } else {
821 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
824 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
825 switch (n) {
826 case 64: GET_REGA(env->y);
827 case 65: GET_REGA(cpu_get_psr(env));
828 case 66: GET_REGA(env->wim);
829 case 67: GET_REGA(env->tbr);
830 case 68: GET_REGA(env->pc);
831 case 69: GET_REGA(env->npc);
832 case 70: GET_REGA(env->fsr);
833 case 71: GET_REGA(0); /* csr */
834 default: GET_REGA(0);
836 #else
837 if (n < 64) {
838 /* f0-f31 */
839 if (n & 1) {
840 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
841 } else {
842 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
845 if (n < 80) {
846 /* f32-f62 (double width, even numbers only) */
847 GET_REG64(env->fpr[(n - 32) / 2].ll);
849 switch (n) {
850 case 80: GET_REGL(env->pc);
851 case 81: GET_REGL(env->npc);
852 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
853 ((env->asi & 0xff) << 24) |
854 ((env->pstate & 0xfff) << 8) |
855 cpu_get_cwp64(env));
856 case 83: GET_REGL(env->fsr);
857 case 84: GET_REGL(env->fprs);
858 case 85: GET_REGL(env->y);
860 #endif
861 return 0;
864 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
866 #if defined(TARGET_ABI32)
867 abi_ulong tmp;
869 tmp = ldl_p(mem_buf);
870 #else
871 target_ulong tmp;
873 tmp = ldtul_p(mem_buf);
874 #endif
876 if (n < 8) {
877 /* g0..g7 */
878 env->gregs[n] = tmp;
879 } else if (n < 32) {
880 /* register window */
881 env->regwptr[n - 8] = tmp;
883 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
884 else if (n < 64) {
885 /* fprs */
886 /* f0-f31 */
887 if (n & 1) {
888 env->fpr[(n - 32) / 2].l.lower = tmp;
889 } else {
890 env->fpr[(n - 32) / 2].l.upper = tmp;
892 } else {
893 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
894 switch (n) {
895 case 64: env->y = tmp; break;
896 case 65: cpu_put_psr(env, tmp); break;
897 case 66: env->wim = tmp; break;
898 case 67: env->tbr = tmp; break;
899 case 68: env->pc = tmp; break;
900 case 69: env->npc = tmp; break;
901 case 70: env->fsr = tmp; break;
902 default: return 0;
905 return 4;
906 #else
907 else if (n < 64) {
908 /* f0-f31 */
909 tmp = ldl_p(mem_buf);
910 if (n & 1) {
911 env->fpr[(n - 32) / 2].l.lower = tmp;
912 } else {
913 env->fpr[(n - 32) / 2].l.upper = tmp;
915 return 4;
916 } else if (n < 80) {
917 /* f32-f62 (double width, even numbers only) */
918 env->fpr[(n - 32) / 2].ll = tmp;
919 } else {
920 switch (n) {
921 case 80: env->pc = tmp; break;
922 case 81: env->npc = tmp; break;
923 case 82:
924 cpu_put_ccr(env, tmp >> 32);
925 env->asi = (tmp >> 24) & 0xff;
926 env->pstate = (tmp >> 8) & 0xfff;
927 cpu_put_cwp64(env, tmp & 0xff);
928 break;
929 case 83: env->fsr = tmp; break;
930 case 84: env->fprs = tmp; break;
931 case 85: env->y = tmp; break;
932 default: return 0;
935 return 8;
936 #endif
938 #elif defined (TARGET_ARM)
940 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
941 whatever the target description contains. Due to a historical mishap
942 the FPA registers appear in between core integer regs and the CPSR.
943 We hack round this by giving the FPA regs zero size when talking to a
944 newer gdb. */
945 #define NUM_CORE_REGS 26
946 #define GDB_CORE_XML "arm-core.xml"
948 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
950 if (n < 16) {
951 /* Core integer register. */
952 GET_REG32(env->regs[n]);
954 if (n < 24) {
955 /* FPA registers. */
956 if (gdb_has_xml)
957 return 0;
958 memset(mem_buf, 0, 12);
959 return 12;
961 switch (n) {
962 case 24:
963 /* FPA status register. */
964 if (gdb_has_xml)
965 return 0;
966 GET_REG32(0);
967 case 25:
968 /* CPSR */
969 GET_REG32(cpsr_read(env));
971 /* Unknown register. */
972 return 0;
975 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
977 uint32_t tmp;
979 tmp = ldl_p(mem_buf);
981 /* Mask out low bit of PC to workaround gdb bugs. This will probably
982 cause problems if we ever implement the Jazelle DBX extensions. */
983 if (n == 15)
984 tmp &= ~1;
986 if (n < 16) {
987 /* Core integer register. */
988 env->regs[n] = tmp;
989 return 4;
991 if (n < 24) { /* 16-23 */
992 /* FPA registers (ignored). */
993 if (gdb_has_xml)
994 return 0;
995 return 12;
997 switch (n) {
998 case 24:
999 /* FPA status register (ignored). */
1000 if (gdb_has_xml)
1001 return 0;
1002 return 4;
1003 case 25:
1004 /* CPSR */
1005 cpsr_write (env, tmp, 0xffffffff);
1006 return 4;
1008 /* Unknown register. */
1009 return 0;
1012 #elif defined (TARGET_M68K)
1014 #define NUM_CORE_REGS 18
1016 #define GDB_CORE_XML "cf-core.xml"
1018 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1020 if (n < 8) {
1021 /* D0-D7 */
1022 GET_REG32(env->dregs[n]);
1023 } else if (n < 16) {
1024 /* A0-A7 */
1025 GET_REG32(env->aregs[n - 8]);
1026 } else {
1027 switch (n) {
1028 case 16: GET_REG32(env->sr);
1029 case 17: GET_REG32(env->pc);
1032 /* FP registers not included here because they vary between
1033 ColdFire and m68k. Use XML bits for these. */
1034 return 0;
1037 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1039 uint32_t tmp;
1041 tmp = ldl_p(mem_buf);
1043 if (n < 8) {
1044 /* D0-D7 */
1045 env->dregs[n] = tmp;
1046 } else if (n < 16) {
1047 /* A0-A7 */
1048 env->aregs[n - 8] = tmp;
1049 } else {
1050 switch (n) {
1051 case 16: env->sr = tmp; break;
1052 case 17: env->pc = tmp; break;
1053 default: return 0;
1056 return 4;
1058 #elif defined (TARGET_MIPS)
1060 #define NUM_CORE_REGS 73
1062 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1064 if (n < 32) {
1065 GET_REGL(env->active_tc.gpr[n]);
1067 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1068 if (n >= 38 && n < 70) {
1069 if (env->CP0_Status & (1 << CP0St_FR))
1070 GET_REGL(env->active_fpu.fpr[n - 38].d);
1071 else
1072 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1074 switch (n) {
1075 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1076 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1079 switch (n) {
1080 case 32: GET_REGL((int32_t)env->CP0_Status);
1081 case 33: GET_REGL(env->active_tc.LO[0]);
1082 case 34: GET_REGL(env->active_tc.HI[0]);
1083 case 35: GET_REGL(env->CP0_BadVAddr);
1084 case 36: GET_REGL((int32_t)env->CP0_Cause);
1085 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1086 case 72: GET_REGL(0); /* fp */
1087 case 89: GET_REGL((int32_t)env->CP0_PRid);
1089 if (n >= 73 && n <= 88) {
1090 /* 16 embedded regs. */
1091 GET_REGL(0);
1094 return 0;
1097 /* convert MIPS rounding mode in FCR31 to IEEE library */
1098 static unsigned int ieee_rm[] =
1100 float_round_nearest_even,
1101 float_round_to_zero,
1102 float_round_up,
1103 float_round_down
1105 #define RESTORE_ROUNDING_MODE \
1106 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1108 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1110 target_ulong tmp;
1112 tmp = ldtul_p(mem_buf);
1114 if (n < 32) {
1115 env->active_tc.gpr[n] = tmp;
1116 return sizeof(target_ulong);
1118 if (env->CP0_Config1 & (1 << CP0C1_FP)
1119 && n >= 38 && n < 73) {
1120 if (n < 70) {
1121 if (env->CP0_Status & (1 << CP0St_FR))
1122 env->active_fpu.fpr[n - 38].d = tmp;
1123 else
1124 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1126 switch (n) {
1127 case 70:
1128 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1129 /* set rounding mode */
1130 RESTORE_ROUNDING_MODE;
1131 break;
1132 case 71: env->active_fpu.fcr0 = tmp; break;
1134 return sizeof(target_ulong);
1136 switch (n) {
1137 case 32: env->CP0_Status = tmp; break;
1138 case 33: env->active_tc.LO[0] = tmp; break;
1139 case 34: env->active_tc.HI[0] = tmp; break;
1140 case 35: env->CP0_BadVAddr = tmp; break;
1141 case 36: env->CP0_Cause = tmp; break;
1142 case 37:
1143 env->active_tc.PC = tmp & ~(target_ulong)1;
1144 if (tmp & 1) {
1145 env->hflags |= MIPS_HFLAG_M16;
1146 } else {
1147 env->hflags &= ~(MIPS_HFLAG_M16);
1149 break;
1150 case 72: /* fp, ignored */ break;
1151 default:
1152 if (n > 89)
1153 return 0;
1154 /* Other registers are readonly. Ignore writes. */
1155 break;
1158 return sizeof(target_ulong);
1160 #elif defined(TARGET_OPENRISC)
1162 #define NUM_CORE_REGS (32 + 3)
1164 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1166 if (n < 32) {
1167 GET_REG32(env->gpr[n]);
1168 } else {
1169 switch (n) {
1170 case 32: /* PPC */
1171 GET_REG32(env->ppc);
1172 break;
1174 case 33: /* NPC */
1175 GET_REG32(env->npc);
1176 break;
1178 case 34: /* SR */
1179 GET_REG32(env->sr);
1180 break;
1182 default:
1183 break;
1186 return 0;
1189 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1190 uint8_t *mem_buf, int n)
1192 uint32_t tmp;
1194 if (n > NUM_CORE_REGS) {
1195 return 0;
1198 tmp = ldl_p(mem_buf);
1200 if (n < 32) {
1201 env->gpr[n] = tmp;
1202 } else {
1203 switch (n) {
1204 case 32: /* PPC */
1205 env->ppc = tmp;
1206 break;
1208 case 33: /* NPC */
1209 env->npc = tmp;
1210 break;
1212 case 34: /* SR */
1213 env->sr = tmp;
1214 break;
1216 default:
1217 break;
1220 return 4;
1222 #elif defined (TARGET_SH4)
1224 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1225 /* FIXME: We should use XML for this. */
1227 #define NUM_CORE_REGS 59
1229 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1231 switch (n) {
1232 case 0 ... 7:
1233 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1234 GET_REGL(env->gregs[n + 16]);
1235 } else {
1236 GET_REGL(env->gregs[n]);
1238 case 8 ... 15:
1239 GET_REGL(env->gregs[n]);
1240 case 16:
1241 GET_REGL(env->pc);
1242 case 17:
1243 GET_REGL(env->pr);
1244 case 18:
1245 GET_REGL(env->gbr);
1246 case 19:
1247 GET_REGL(env->vbr);
1248 case 20:
1249 GET_REGL(env->mach);
1250 case 21:
1251 GET_REGL(env->macl);
1252 case 22:
1253 GET_REGL(env->sr);
1254 case 23:
1255 GET_REGL(env->fpul);
1256 case 24:
1257 GET_REGL(env->fpscr);
1258 case 25 ... 40:
1259 if (env->fpscr & FPSCR_FR) {
1260 stfl_p(mem_buf, env->fregs[n - 9]);
1261 } else {
1262 stfl_p(mem_buf, env->fregs[n - 25]);
1264 return 4;
1265 case 41:
1266 GET_REGL(env->ssr);
1267 case 42:
1268 GET_REGL(env->spc);
1269 case 43 ... 50:
1270 GET_REGL(env->gregs[n - 43]);
1271 case 51 ... 58:
1272 GET_REGL(env->gregs[n - (51 - 16)]);
1275 return 0;
1278 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1280 switch (n) {
1281 case 0 ... 7:
1282 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1283 env->gregs[n + 16] = ldl_p(mem_buf);
1284 } else {
1285 env->gregs[n] = ldl_p(mem_buf);
1287 break;
1288 case 8 ... 15:
1289 env->gregs[n] = ldl_p(mem_buf);
1290 break;
1291 case 16:
1292 env->pc = ldl_p(mem_buf);
1293 break;
1294 case 17:
1295 env->pr = ldl_p(mem_buf);
1296 break;
1297 case 18:
1298 env->gbr = ldl_p(mem_buf);
1299 break;
1300 case 19:
1301 env->vbr = ldl_p(mem_buf);
1302 break;
1303 case 20:
1304 env->mach = ldl_p(mem_buf);
1305 break;
1306 case 21:
1307 env->macl = ldl_p(mem_buf);
1308 break;
1309 case 22:
1310 env->sr = ldl_p(mem_buf);
1311 break;
1312 case 23:
1313 env->fpul = ldl_p(mem_buf);
1314 break;
1315 case 24:
1316 env->fpscr = ldl_p(mem_buf);
1317 break;
1318 case 25 ... 40:
1319 if (env->fpscr & FPSCR_FR) {
1320 env->fregs[n - 9] = ldfl_p(mem_buf);
1321 } else {
1322 env->fregs[n - 25] = ldfl_p(mem_buf);
1324 break;
1325 case 41:
1326 env->ssr = ldl_p(mem_buf);
1327 break;
1328 case 42:
1329 env->spc = ldl_p(mem_buf);
1330 break;
1331 case 43 ... 50:
1332 env->gregs[n - 43] = ldl_p(mem_buf);
1333 break;
1334 case 51 ... 58:
1335 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1336 break;
1337 default: return 0;
1340 return 4;
1342 #elif defined (TARGET_MICROBLAZE)
1344 #define NUM_CORE_REGS (32 + 5)
1346 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1348 if (n < 32) {
1349 GET_REG32(env->regs[n]);
1350 } else {
1351 GET_REG32(env->sregs[n - 32]);
1353 return 0;
1356 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1358 uint32_t tmp;
1360 if (n > NUM_CORE_REGS)
1361 return 0;
1363 tmp = ldl_p(mem_buf);
1365 if (n < 32) {
1366 env->regs[n] = tmp;
1367 } else {
1368 env->sregs[n - 32] = tmp;
1370 return 4;
1372 #elif defined (TARGET_CRIS)
1374 #define NUM_CORE_REGS 49
1376 static int
1377 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1379 if (n < 15) {
1380 GET_REG32(env->regs[n]);
1383 if (n == 15) {
1384 GET_REG32(env->pc);
1387 if (n < 32) {
1388 switch (n) {
1389 case 16:
1390 GET_REG8(env->pregs[n - 16]);
1391 break;
1392 case 17:
1393 GET_REG8(env->pregs[n - 16]);
1394 break;
1395 case 20:
1396 case 21:
1397 GET_REG16(env->pregs[n - 16]);
1398 break;
1399 default:
1400 if (n >= 23) {
1401 GET_REG32(env->pregs[n - 16]);
1403 break;
1406 return 0;
1409 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1411 uint8_t srs;
1413 if (env->pregs[PR_VR] < 32)
1414 return read_register_crisv10(env, mem_buf, n);
1416 srs = env->pregs[PR_SRS];
1417 if (n < 16) {
1418 GET_REG32(env->regs[n]);
1421 if (n >= 21 && n < 32) {
1422 GET_REG32(env->pregs[n - 16]);
1424 if (n >= 33 && n < 49) {
1425 GET_REG32(env->sregs[srs][n - 33]);
1427 switch (n) {
1428 case 16: GET_REG8(env->pregs[0]);
1429 case 17: GET_REG8(env->pregs[1]);
1430 case 18: GET_REG32(env->pregs[2]);
1431 case 19: GET_REG8(srs);
1432 case 20: GET_REG16(env->pregs[4]);
1433 case 32: GET_REG32(env->pc);
1436 return 0;
1439 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1441 uint32_t tmp;
1443 if (n > 49)
1444 return 0;
1446 tmp = ldl_p(mem_buf);
1448 if (n < 16) {
1449 env->regs[n] = tmp;
1452 if (n >= 21 && n < 32) {
1453 env->pregs[n - 16] = tmp;
1456 /* FIXME: Should support function regs be writable? */
1457 switch (n) {
1458 case 16: return 1;
1459 case 17: return 1;
1460 case 18: env->pregs[PR_PID] = tmp; break;
1461 case 19: return 1;
1462 case 20: return 2;
1463 case 32: env->pc = tmp; break;
1466 return 4;
1468 #elif defined (TARGET_ALPHA)
1470 #define NUM_CORE_REGS 67
1472 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1474 uint64_t val;
1475 CPU_DoubleU d;
1477 switch (n) {
1478 case 0 ... 30:
1479 val = env->ir[n];
1480 break;
1481 case 32 ... 62:
1482 d.d = env->fir[n - 32];
1483 val = d.ll;
1484 break;
1485 case 63:
1486 val = cpu_alpha_load_fpcr(env);
1487 break;
1488 case 64:
1489 val = env->pc;
1490 break;
1491 case 66:
1492 val = env->unique;
1493 break;
1494 case 31:
1495 case 65:
1496 /* 31 really is the zero register; 65 is unassigned in the
1497 gdb protocol, but is still required to occupy 8 bytes. */
1498 val = 0;
1499 break;
1500 default:
1501 return 0;
1503 GET_REGL(val);
1506 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1508 target_ulong tmp = ldtul_p(mem_buf);
1509 CPU_DoubleU d;
1511 switch (n) {
1512 case 0 ... 30:
1513 env->ir[n] = tmp;
1514 break;
1515 case 32 ... 62:
1516 d.ll = tmp;
1517 env->fir[n - 32] = d.d;
1518 break;
1519 case 63:
1520 cpu_alpha_store_fpcr(env, tmp);
1521 break;
1522 case 64:
1523 env->pc = tmp;
1524 break;
1525 case 66:
1526 env->unique = tmp;
1527 break;
1528 case 31:
1529 case 65:
1530 /* 31 really is the zero register; 65 is unassigned in the
1531 gdb protocol, but is still required to occupy 8 bytes. */
1532 break;
1533 default:
1534 return 0;
1536 return 8;
1538 #elif defined (TARGET_S390X)
1540 #define NUM_CORE_REGS S390_NUM_REGS
1542 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1544 uint64_t val;
1545 int cc_op;
1547 switch (n) {
1548 case S390_PSWM_REGNUM:
1549 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1550 val = deposit64(env->psw.mask, 44, 2, cc_op);
1551 GET_REGL(val);
1552 break;
1553 case S390_PSWA_REGNUM:
1554 GET_REGL(env->psw.addr);
1555 break;
1556 case S390_R0_REGNUM ... S390_R15_REGNUM:
1557 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1558 break;
1559 case S390_A0_REGNUM ... S390_A15_REGNUM:
1560 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1561 break;
1562 case S390_FPC_REGNUM:
1563 GET_REG32(env->fpc);
1564 break;
1565 case S390_F0_REGNUM ... S390_F15_REGNUM:
1566 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1567 break;
1570 return 0;
1573 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1575 target_ulong tmpl;
1576 uint32_t tmp32;
1577 int r = 8;
1578 tmpl = ldtul_p(mem_buf);
1579 tmp32 = ldl_p(mem_buf);
1581 switch (n) {
1582 case S390_PSWM_REGNUM:
1583 env->psw.mask = tmpl;
1584 env->cc_op = extract64(tmpl, 44, 2);
1585 break;
1586 case S390_PSWA_REGNUM:
1587 env->psw.addr = tmpl;
1588 break;
1589 case S390_R0_REGNUM ... S390_R15_REGNUM:
1590 env->regs[n-S390_R0_REGNUM] = tmpl;
1591 break;
1592 case S390_A0_REGNUM ... S390_A15_REGNUM:
1593 env->aregs[n-S390_A0_REGNUM] = tmp32;
1594 r = 4;
1595 break;
1596 case S390_FPC_REGNUM:
1597 env->fpc = tmp32;
1598 r = 4;
1599 break;
1600 case S390_F0_REGNUM ... S390_F15_REGNUM:
1601 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1602 break;
1603 default:
1604 return 0;
1606 return r;
1608 #elif defined (TARGET_LM32)
1610 #include "hw/lm32_pic.h"
1611 #define NUM_CORE_REGS (32 + 7)
1613 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1615 if (n < 32) {
1616 GET_REG32(env->regs[n]);
1617 } else {
1618 switch (n) {
1619 case 32:
1620 GET_REG32(env->pc);
1621 break;
1622 /* FIXME: put in right exception ID */
1623 case 33:
1624 GET_REG32(0);
1625 break;
1626 case 34:
1627 GET_REG32(env->eba);
1628 break;
1629 case 35:
1630 GET_REG32(env->deba);
1631 break;
1632 case 36:
1633 GET_REG32(env->ie);
1634 break;
1635 case 37:
1636 GET_REG32(lm32_pic_get_im(env->pic_state));
1637 break;
1638 case 38:
1639 GET_REG32(lm32_pic_get_ip(env->pic_state));
1640 break;
1643 return 0;
1646 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1648 uint32_t tmp;
1650 if (n > NUM_CORE_REGS) {
1651 return 0;
1654 tmp = ldl_p(mem_buf);
1656 if (n < 32) {
1657 env->regs[n] = tmp;
1658 } else {
1659 switch (n) {
1660 case 32:
1661 env->pc = tmp;
1662 break;
1663 case 34:
1664 env->eba = tmp;
1665 break;
1666 case 35:
1667 env->deba = tmp;
1668 break;
1669 case 36:
1670 env->ie = tmp;
1671 break;
1672 case 37:
1673 lm32_pic_set_im(env->pic_state, tmp);
1674 break;
1675 case 38:
1676 lm32_pic_set_ip(env->pic_state, tmp);
1677 break;
1680 return 4;
1682 #elif defined(TARGET_XTENSA)
1684 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1685 * Use num_regs to see all registers. gdb modification is required for that:
1686 * reset bit 0 in the 'flags' field of the registers definitions in the
1687 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1689 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1690 #define num_g_regs NUM_CORE_REGS
1692 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1694 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1696 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1697 return 0;
1700 switch (reg->type) {
1701 case 9: /*pc*/
1702 GET_REG32(env->pc);
1703 break;
1705 case 1: /*ar*/
1706 xtensa_sync_phys_from_window(env);
1707 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1708 break;
1710 case 2: /*SR*/
1711 GET_REG32(env->sregs[reg->targno & 0xff]);
1712 break;
1714 case 3: /*UR*/
1715 GET_REG32(env->uregs[reg->targno & 0xff]);
1716 break;
1718 case 4: /*f*/
1719 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1720 break;
1722 case 8: /*a*/
1723 GET_REG32(env->regs[reg->targno & 0x0f]);
1724 break;
1726 default:
1727 qemu_log("%s from reg %d of unsupported type %d\n",
1728 __func__, n, reg->type);
1729 return 0;
1733 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1735 uint32_t tmp;
1736 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1738 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1739 return 0;
1742 tmp = ldl_p(mem_buf);
1744 switch (reg->type) {
1745 case 9: /*pc*/
1746 env->pc = tmp;
1747 break;
1749 case 1: /*ar*/
1750 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1751 xtensa_sync_window_from_phys(env);
1752 break;
1754 case 2: /*SR*/
1755 env->sregs[reg->targno & 0xff] = tmp;
1756 break;
1758 case 3: /*UR*/
1759 env->uregs[reg->targno & 0xff] = tmp;
1760 break;
1762 case 4: /*f*/
1763 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1764 break;
1766 case 8: /*a*/
1767 env->regs[reg->targno & 0x0f] = tmp;
1768 break;
1770 default:
1771 qemu_log("%s to reg %d of unsupported type %d\n",
1772 __func__, n, reg->type);
1773 return 0;
1776 return 4;
1778 #else
1780 #define NUM_CORE_REGS 0
1782 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1784 return 0;
1787 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1789 return 0;
1792 #endif
1794 #if !defined(TARGET_XTENSA)
1795 static int num_g_regs = NUM_CORE_REGS;
1796 #endif
1798 #ifdef GDB_CORE_XML
1799 /* Encode data using the encoding for 'x' packets. */
1800 static int memtox(char *buf, const char *mem, int len)
1802 char *p = buf;
1803 char c;
1805 while (len--) {
1806 c = *(mem++);
1807 switch (c) {
1808 case '#': case '$': case '*': case '}':
1809 *(p++) = '}';
1810 *(p++) = c ^ 0x20;
1811 break;
1812 default:
1813 *(p++) = c;
1814 break;
1817 return p - buf;
1820 static const char *get_feature_xml(const char *p, const char **newp)
1822 size_t len;
1823 int i;
1824 const char *name;
1825 static char target_xml[1024];
1827 len = 0;
1828 while (p[len] && p[len] != ':')
1829 len++;
1830 *newp = p + len;
1832 name = NULL;
1833 if (strncmp(p, "target.xml", len) == 0) {
1834 /* Generate the XML description for this CPU. */
1835 if (!target_xml[0]) {
1836 GDBRegisterState *r;
1838 snprintf(target_xml, sizeof(target_xml),
1839 "<?xml version=\"1.0\"?>"
1840 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1841 "<target>"
1842 "<xi:include href=\"%s\"/>",
1843 GDB_CORE_XML);
1845 for (r = first_cpu->gdb_regs; r; r = r->next) {
1846 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1847 pstrcat(target_xml, sizeof(target_xml), r->xml);
1848 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1850 pstrcat(target_xml, sizeof(target_xml), "</target>");
1852 return target_xml;
1854 for (i = 0; ; i++) {
1855 name = xml_builtin[i][0];
1856 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1857 break;
1859 return name ? xml_builtin[i][1] : NULL;
1861 #endif
1863 static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1865 GDBRegisterState *r;
1867 if (reg < NUM_CORE_REGS)
1868 return cpu_gdb_read_register(env, mem_buf, reg);
1870 for (r = env->gdb_regs; r; r = r->next) {
1871 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1872 return r->get_reg(env, mem_buf, reg - r->base_reg);
1875 return 0;
1878 static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1880 GDBRegisterState *r;
1882 if (reg < NUM_CORE_REGS)
1883 return cpu_gdb_write_register(env, mem_buf, reg);
1885 for (r = env->gdb_regs; r; r = r->next) {
1886 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1887 return r->set_reg(env, mem_buf, reg - r->base_reg);
1890 return 0;
1893 #if !defined(TARGET_XTENSA)
1894 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1895 specifies the first register number and these registers are included in
1896 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1897 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1900 void gdb_register_coprocessor(CPUArchState * env,
1901 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1902 int num_regs, const char *xml, int g_pos)
1904 GDBRegisterState *s;
1905 GDBRegisterState **p;
1906 static int last_reg = NUM_CORE_REGS;
1908 p = &env->gdb_regs;
1909 while (*p) {
1910 /* Check for duplicates. */
1911 if (strcmp((*p)->xml, xml) == 0)
1912 return;
1913 p = &(*p)->next;
1916 s = g_new0(GDBRegisterState, 1);
1917 s->base_reg = last_reg;
1918 s->num_regs = num_regs;
1919 s->get_reg = get_reg;
1920 s->set_reg = set_reg;
1921 s->xml = xml;
1923 /* Add to end of list. */
1924 last_reg += num_regs;
1925 *p = s;
1926 if (g_pos) {
1927 if (g_pos != s->base_reg) {
1928 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1929 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1930 } else {
1931 num_g_regs = last_reg;
1935 #endif
1937 #ifndef CONFIG_USER_ONLY
1938 static const int xlat_gdb_type[] = {
1939 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1940 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1941 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1943 #endif
1945 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1947 CPUArchState *env;
1948 int err = 0;
1950 if (kvm_enabled())
1951 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1953 switch (type) {
1954 case GDB_BREAKPOINT_SW:
1955 case GDB_BREAKPOINT_HW:
1956 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1957 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1958 if (err)
1959 break;
1961 return err;
1962 #ifndef CONFIG_USER_ONLY
1963 case GDB_WATCHPOINT_WRITE:
1964 case GDB_WATCHPOINT_READ:
1965 case GDB_WATCHPOINT_ACCESS:
1966 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1967 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1968 NULL);
1969 if (err)
1970 break;
1972 return err;
1973 #endif
1974 default:
1975 return -ENOSYS;
1979 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1981 CPUArchState *env;
1982 int err = 0;
1984 if (kvm_enabled())
1985 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1987 switch (type) {
1988 case GDB_BREAKPOINT_SW:
1989 case GDB_BREAKPOINT_HW:
1990 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1991 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1992 if (err)
1993 break;
1995 return err;
1996 #ifndef CONFIG_USER_ONLY
1997 case GDB_WATCHPOINT_WRITE:
1998 case GDB_WATCHPOINT_READ:
1999 case GDB_WATCHPOINT_ACCESS:
2000 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2001 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2002 if (err)
2003 break;
2005 return err;
2006 #endif
2007 default:
2008 return -ENOSYS;
2012 static void gdb_breakpoint_remove_all(void)
2014 CPUArchState *env;
2016 if (kvm_enabled()) {
2017 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
2018 return;
2021 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2022 cpu_breakpoint_remove_all(env, BP_GDB);
2023 #ifndef CONFIG_USER_ONLY
2024 cpu_watchpoint_remove_all(env, BP_GDB);
2025 #endif
2029 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2031 cpu_synchronize_state(s->c_cpu);
2032 #if defined(TARGET_I386)
2033 s->c_cpu->eip = pc;
2034 #elif defined (TARGET_PPC)
2035 s->c_cpu->nip = pc;
2036 #elif defined (TARGET_SPARC)
2037 s->c_cpu->pc = pc;
2038 s->c_cpu->npc = pc + 4;
2039 #elif defined (TARGET_ARM)
2040 s->c_cpu->regs[15] = pc;
2041 #elif defined (TARGET_SH4)
2042 s->c_cpu->pc = pc;
2043 #elif defined (TARGET_MIPS)
2044 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
2045 if (pc & 1) {
2046 s->c_cpu->hflags |= MIPS_HFLAG_M16;
2047 } else {
2048 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
2050 #elif defined (TARGET_MICROBLAZE)
2051 s->c_cpu->sregs[SR_PC] = pc;
2052 #elif defined(TARGET_OPENRISC)
2053 s->c_cpu->pc = pc;
2054 #elif defined (TARGET_CRIS)
2055 s->c_cpu->pc = pc;
2056 #elif defined (TARGET_ALPHA)
2057 s->c_cpu->pc = pc;
2058 #elif defined (TARGET_S390X)
2059 s->c_cpu->psw.addr = pc;
2060 #elif defined (TARGET_LM32)
2061 s->c_cpu->pc = pc;
2062 #elif defined(TARGET_XTENSA)
2063 s->c_cpu->pc = pc;
2064 #endif
2067 static CPUArchState *find_cpu(uint32_t thread_id)
2069 CPUArchState *env;
2070 CPUState *cpu;
2072 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2073 cpu = ENV_GET_CPU(env);
2074 if (cpu_index(cpu) == thread_id) {
2075 return env;
2079 return NULL;
2082 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2084 CPUArchState *env;
2085 const char *p;
2086 uint32_t thread;
2087 int ch, reg_size, type, res;
2088 char buf[MAX_PACKET_LENGTH];
2089 uint8_t mem_buf[MAX_PACKET_LENGTH];
2090 uint8_t *registers;
2091 target_ulong addr, len;
2093 #ifdef DEBUG_GDB
2094 printf("command='%s'\n", line_buf);
2095 #endif
2096 p = line_buf;
2097 ch = *p++;
2098 switch(ch) {
2099 case '?':
2100 /* TODO: Make this return the correct value for user-mode. */
2101 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2102 cpu_index(ENV_GET_CPU(s->c_cpu)));
2103 put_packet(s, buf);
2104 /* Remove all the breakpoints when this query is issued,
2105 * because gdb is doing and initial connect and the state
2106 * should be cleaned up.
2108 gdb_breakpoint_remove_all();
2109 break;
2110 case 'c':
2111 if (*p != '\0') {
2112 addr = strtoull(p, (char **)&p, 16);
2113 gdb_set_cpu_pc(s, addr);
2115 s->signal = 0;
2116 gdb_continue(s);
2117 return RS_IDLE;
2118 case 'C':
2119 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2120 if (s->signal == -1)
2121 s->signal = 0;
2122 gdb_continue(s);
2123 return RS_IDLE;
2124 case 'v':
2125 if (strncmp(p, "Cont", 4) == 0) {
2126 int res_signal, res_thread;
2128 p += 4;
2129 if (*p == '?') {
2130 put_packet(s, "vCont;c;C;s;S");
2131 break;
2133 res = 0;
2134 res_signal = 0;
2135 res_thread = 0;
2136 while (*p) {
2137 int action, signal;
2139 if (*p++ != ';') {
2140 res = 0;
2141 break;
2143 action = *p++;
2144 signal = 0;
2145 if (action == 'C' || action == 'S') {
2146 signal = strtoul(p, (char **)&p, 16);
2147 } else if (action != 'c' && action != 's') {
2148 res = 0;
2149 break;
2151 thread = 0;
2152 if (*p == ':') {
2153 thread = strtoull(p+1, (char **)&p, 16);
2155 action = tolower(action);
2156 if (res == 0 || (res == 'c' && action == 's')) {
2157 res = action;
2158 res_signal = signal;
2159 res_thread = thread;
2162 if (res) {
2163 if (res_thread != -1 && res_thread != 0) {
2164 env = find_cpu(res_thread);
2165 if (env == NULL) {
2166 put_packet(s, "E22");
2167 break;
2169 s->c_cpu = env;
2171 if (res == 's') {
2172 cpu_single_step(s->c_cpu, sstep_flags);
2174 s->signal = res_signal;
2175 gdb_continue(s);
2176 return RS_IDLE;
2178 break;
2179 } else {
2180 goto unknown_command;
2182 case 'k':
2183 #ifdef CONFIG_USER_ONLY
2184 /* Kill the target */
2185 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2186 exit(0);
2187 #endif
2188 case 'D':
2189 /* Detach packet */
2190 gdb_breakpoint_remove_all();
2191 gdb_syscall_mode = GDB_SYS_DISABLED;
2192 gdb_continue(s);
2193 put_packet(s, "OK");
2194 break;
2195 case 's':
2196 if (*p != '\0') {
2197 addr = strtoull(p, (char **)&p, 16);
2198 gdb_set_cpu_pc(s, addr);
2200 cpu_single_step(s->c_cpu, sstep_flags);
2201 gdb_continue(s);
2202 return RS_IDLE;
2203 case 'F':
2205 target_ulong ret;
2206 target_ulong err;
2208 ret = strtoull(p, (char **)&p, 16);
2209 if (*p == ',') {
2210 p++;
2211 err = strtoull(p, (char **)&p, 16);
2212 } else {
2213 err = 0;
2215 if (*p == ',')
2216 p++;
2217 type = *p;
2218 if (s->current_syscall_cb) {
2219 s->current_syscall_cb(s->c_cpu, ret, err);
2220 s->current_syscall_cb = NULL;
2222 if (type == 'C') {
2223 put_packet(s, "T02");
2224 } else {
2225 gdb_continue(s);
2228 break;
2229 case 'g':
2230 cpu_synchronize_state(s->g_cpu);
2231 env = s->g_cpu;
2232 len = 0;
2233 for (addr = 0; addr < num_g_regs; addr++) {
2234 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2235 len += reg_size;
2237 memtohex(buf, mem_buf, len);
2238 put_packet(s, buf);
2239 break;
2240 case 'G':
2241 cpu_synchronize_state(s->g_cpu);
2242 env = s->g_cpu;
2243 registers = mem_buf;
2244 len = strlen(p) / 2;
2245 hextomem((uint8_t *)registers, p, len);
2246 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2247 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2248 len -= reg_size;
2249 registers += reg_size;
2251 put_packet(s, "OK");
2252 break;
2253 case 'm':
2254 addr = strtoull(p, (char **)&p, 16);
2255 if (*p == ',')
2256 p++;
2257 len = strtoull(p, NULL, 16);
2258 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2259 put_packet (s, "E14");
2260 } else {
2261 memtohex(buf, mem_buf, len);
2262 put_packet(s, buf);
2264 break;
2265 case 'M':
2266 addr = strtoull(p, (char **)&p, 16);
2267 if (*p == ',')
2268 p++;
2269 len = strtoull(p, (char **)&p, 16);
2270 if (*p == ':')
2271 p++;
2272 hextomem(mem_buf, p, len);
2273 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) {
2274 put_packet(s, "E14");
2275 } else {
2276 put_packet(s, "OK");
2278 break;
2279 case 'p':
2280 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2281 This works, but can be very slow. Anything new enough to
2282 understand XML also knows how to use this properly. */
2283 if (!gdb_has_xml)
2284 goto unknown_command;
2285 addr = strtoull(p, (char **)&p, 16);
2286 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2287 if (reg_size) {
2288 memtohex(buf, mem_buf, reg_size);
2289 put_packet(s, buf);
2290 } else {
2291 put_packet(s, "E14");
2293 break;
2294 case 'P':
2295 if (!gdb_has_xml)
2296 goto unknown_command;
2297 addr = strtoull(p, (char **)&p, 16);
2298 if (*p == '=')
2299 p++;
2300 reg_size = strlen(p) / 2;
2301 hextomem(mem_buf, p, reg_size);
2302 gdb_write_register(s->g_cpu, mem_buf, addr);
2303 put_packet(s, "OK");
2304 break;
2305 case 'Z':
2306 case 'z':
2307 type = strtoul(p, (char **)&p, 16);
2308 if (*p == ',')
2309 p++;
2310 addr = strtoull(p, (char **)&p, 16);
2311 if (*p == ',')
2312 p++;
2313 len = strtoull(p, (char **)&p, 16);
2314 if (ch == 'Z')
2315 res = gdb_breakpoint_insert(addr, len, type);
2316 else
2317 res = gdb_breakpoint_remove(addr, len, type);
2318 if (res >= 0)
2319 put_packet(s, "OK");
2320 else if (res == -ENOSYS)
2321 put_packet(s, "");
2322 else
2323 put_packet(s, "E22");
2324 break;
2325 case 'H':
2326 type = *p++;
2327 thread = strtoull(p, (char **)&p, 16);
2328 if (thread == -1 || thread == 0) {
2329 put_packet(s, "OK");
2330 break;
2332 env = find_cpu(thread);
2333 if (env == NULL) {
2334 put_packet(s, "E22");
2335 break;
2337 switch (type) {
2338 case 'c':
2339 s->c_cpu = env;
2340 put_packet(s, "OK");
2341 break;
2342 case 'g':
2343 s->g_cpu = env;
2344 put_packet(s, "OK");
2345 break;
2346 default:
2347 put_packet(s, "E22");
2348 break;
2350 break;
2351 case 'T':
2352 thread = strtoull(p, (char **)&p, 16);
2353 env = find_cpu(thread);
2355 if (env != NULL) {
2356 put_packet(s, "OK");
2357 } else {
2358 put_packet(s, "E22");
2360 break;
2361 case 'q':
2362 case 'Q':
2363 /* parse any 'q' packets here */
2364 if (!strcmp(p,"qemu.sstepbits")) {
2365 /* Query Breakpoint bit definitions */
2366 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2367 SSTEP_ENABLE,
2368 SSTEP_NOIRQ,
2369 SSTEP_NOTIMER);
2370 put_packet(s, buf);
2371 break;
2372 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2373 /* Display or change the sstep_flags */
2374 p += 10;
2375 if (*p != '=') {
2376 /* Display current setting */
2377 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2378 put_packet(s, buf);
2379 break;
2381 p++;
2382 type = strtoul(p, (char **)&p, 16);
2383 sstep_flags = type;
2384 put_packet(s, "OK");
2385 break;
2386 } else if (strcmp(p,"C") == 0) {
2387 /* "Current thread" remains vague in the spec, so always return
2388 * the first CPU (gdb returns the first thread). */
2389 put_packet(s, "QC1");
2390 break;
2391 } else if (strcmp(p,"fThreadInfo") == 0) {
2392 s->query_cpu = first_cpu;
2393 goto report_cpuinfo;
2394 } else if (strcmp(p,"sThreadInfo") == 0) {
2395 report_cpuinfo:
2396 if (s->query_cpu) {
2397 snprintf(buf, sizeof(buf), "m%x",
2398 cpu_index(ENV_GET_CPU(s->query_cpu)));
2399 put_packet(s, buf);
2400 s->query_cpu = s->query_cpu->next_cpu;
2401 } else
2402 put_packet(s, "l");
2403 break;
2404 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2405 thread = strtoull(p+16, (char **)&p, 16);
2406 env = find_cpu(thread);
2407 if (env != NULL) {
2408 CPUState *cpu = ENV_GET_CPU(env);
2409 cpu_synchronize_state(env);
2410 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2411 "CPU#%d [%s]", cpu->cpu_index,
2412 cpu->halted ? "halted " : "running");
2413 memtohex(buf, mem_buf, len);
2414 put_packet(s, buf);
2416 break;
2418 #ifdef CONFIG_USER_ONLY
2419 else if (strncmp(p, "Offsets", 7) == 0) {
2420 TaskState *ts = s->c_cpu->opaque;
2422 snprintf(buf, sizeof(buf),
2423 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2424 ";Bss=" TARGET_ABI_FMT_lx,
2425 ts->info->code_offset,
2426 ts->info->data_offset,
2427 ts->info->data_offset);
2428 put_packet(s, buf);
2429 break;
2431 #else /* !CONFIG_USER_ONLY */
2432 else if (strncmp(p, "Rcmd,", 5) == 0) {
2433 int len = strlen(p + 5);
2435 if ((len % 2) != 0) {
2436 put_packet(s, "E01");
2437 break;
2439 hextomem(mem_buf, p + 5, len);
2440 len = len / 2;
2441 mem_buf[len++] = 0;
2442 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2443 put_packet(s, "OK");
2444 break;
2446 #endif /* !CONFIG_USER_ONLY */
2447 if (strncmp(p, "Supported", 9) == 0) {
2448 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2449 #ifdef GDB_CORE_XML
2450 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2451 #endif
2452 put_packet(s, buf);
2453 break;
2455 #ifdef GDB_CORE_XML
2456 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2457 const char *xml;
2458 target_ulong total_len;
2460 gdb_has_xml = 1;
2461 p += 19;
2462 xml = get_feature_xml(p, &p);
2463 if (!xml) {
2464 snprintf(buf, sizeof(buf), "E00");
2465 put_packet(s, buf);
2466 break;
2469 if (*p == ':')
2470 p++;
2471 addr = strtoul(p, (char **)&p, 16);
2472 if (*p == ',')
2473 p++;
2474 len = strtoul(p, (char **)&p, 16);
2476 total_len = strlen(xml);
2477 if (addr > total_len) {
2478 snprintf(buf, sizeof(buf), "E00");
2479 put_packet(s, buf);
2480 break;
2482 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2483 len = (MAX_PACKET_LENGTH - 5) / 2;
2484 if (len < total_len - addr) {
2485 buf[0] = 'm';
2486 len = memtox(buf + 1, xml + addr, len);
2487 } else {
2488 buf[0] = 'l';
2489 len = memtox(buf + 1, xml + addr, total_len - addr);
2491 put_packet_binary(s, buf, len + 1);
2492 break;
2494 #endif
2495 /* Unrecognised 'q' command. */
2496 goto unknown_command;
2498 default:
2499 unknown_command:
2500 /* put empty packet */
2501 buf[0] = '\0';
2502 put_packet(s, buf);
2503 break;
2505 return RS_IDLE;
2508 void gdb_set_stop_cpu(CPUArchState *env)
2510 gdbserver_state->c_cpu = env;
2511 gdbserver_state->g_cpu = env;
2514 #ifndef CONFIG_USER_ONLY
2515 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2517 GDBState *s = gdbserver_state;
2518 CPUArchState *env = s->c_cpu;
2519 CPUState *cpu = ENV_GET_CPU(env);
2520 char buf[256];
2521 const char *type;
2522 int ret;
2524 if (running || s->state == RS_INACTIVE) {
2525 return;
2527 /* Is there a GDB syscall waiting to be sent? */
2528 if (s->current_syscall_cb) {
2529 put_packet(s, s->syscall_buf);
2530 return;
2532 switch (state) {
2533 case RUN_STATE_DEBUG:
2534 if (env->watchpoint_hit) {
2535 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2536 case BP_MEM_READ:
2537 type = "r";
2538 break;
2539 case BP_MEM_ACCESS:
2540 type = "a";
2541 break;
2542 default:
2543 type = "";
2544 break;
2546 snprintf(buf, sizeof(buf),
2547 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2548 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2549 env->watchpoint_hit->vaddr);
2550 env->watchpoint_hit = NULL;
2551 goto send_packet;
2553 tb_flush(env);
2554 ret = GDB_SIGNAL_TRAP;
2555 break;
2556 case RUN_STATE_PAUSED:
2557 ret = GDB_SIGNAL_INT;
2558 break;
2559 case RUN_STATE_SHUTDOWN:
2560 ret = GDB_SIGNAL_QUIT;
2561 break;
2562 case RUN_STATE_IO_ERROR:
2563 ret = GDB_SIGNAL_IO;
2564 break;
2565 case RUN_STATE_WATCHDOG:
2566 ret = GDB_SIGNAL_ALRM;
2567 break;
2568 case RUN_STATE_INTERNAL_ERROR:
2569 ret = GDB_SIGNAL_ABRT;
2570 break;
2571 case RUN_STATE_SAVE_VM:
2572 case RUN_STATE_RESTORE_VM:
2573 return;
2574 case RUN_STATE_FINISH_MIGRATE:
2575 ret = GDB_SIGNAL_XCPU;
2576 break;
2577 default:
2578 ret = GDB_SIGNAL_UNKNOWN;
2579 break;
2581 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2583 send_packet:
2584 put_packet(s, buf);
2586 /* disable single step if it was enabled */
2587 cpu_single_step(env, 0);
2589 #endif
2591 /* Send a gdb syscall request.
2592 This accepts limited printf-style format specifiers, specifically:
2593 %x - target_ulong argument printed in hex.
2594 %lx - 64-bit argument printed in hex.
2595 %s - string pointer (target_ulong) and length (int) pair. */
2596 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2598 va_list va;
2599 char *p;
2600 char *p_end;
2601 target_ulong addr;
2602 uint64_t i64;
2603 GDBState *s;
2605 s = gdbserver_state;
2606 if (!s)
2607 return;
2608 s->current_syscall_cb = cb;
2609 #ifndef CONFIG_USER_ONLY
2610 vm_stop(RUN_STATE_DEBUG);
2611 #endif
2612 va_start(va, fmt);
2613 p = s->syscall_buf;
2614 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2615 *(p++) = 'F';
2616 while (*fmt) {
2617 if (*fmt == '%') {
2618 fmt++;
2619 switch (*fmt++) {
2620 case 'x':
2621 addr = va_arg(va, target_ulong);
2622 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2623 break;
2624 case 'l':
2625 if (*(fmt++) != 'x')
2626 goto bad_format;
2627 i64 = va_arg(va, uint64_t);
2628 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2629 break;
2630 case 's':
2631 addr = va_arg(va, target_ulong);
2632 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2633 addr, va_arg(va, int));
2634 break;
2635 default:
2636 bad_format:
2637 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2638 fmt - 1);
2639 break;
2641 } else {
2642 *(p++) = *(fmt++);
2645 *p = 0;
2646 va_end(va);
2647 #ifdef CONFIG_USER_ONLY
2648 put_packet(s, s->syscall_buf);
2649 gdb_handlesig(s->c_cpu, 0);
2650 #else
2651 /* In this case wait to send the syscall packet until notification that
2652 the CPU has stopped. This must be done because if the packet is sent
2653 now the reply from the syscall request could be received while the CPU
2654 is still in the running state, which can cause packets to be dropped
2655 and state transition 'T' packets to be sent while the syscall is still
2656 being processed. */
2657 cpu_exit(s->c_cpu);
2658 #endif
2661 static void gdb_read_byte(GDBState *s, int ch)
2663 int i, csum;
2664 uint8_t reply;
2666 #ifndef CONFIG_USER_ONLY
2667 if (s->last_packet_len) {
2668 /* Waiting for a response to the last packet. If we see the start
2669 of a new command then abandon the previous response. */
2670 if (ch == '-') {
2671 #ifdef DEBUG_GDB
2672 printf("Got NACK, retransmitting\n");
2673 #endif
2674 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2676 #ifdef DEBUG_GDB
2677 else if (ch == '+')
2678 printf("Got ACK\n");
2679 else
2680 printf("Got '%c' when expecting ACK/NACK\n", ch);
2681 #endif
2682 if (ch == '+' || ch == '$')
2683 s->last_packet_len = 0;
2684 if (ch != '$')
2685 return;
2687 if (runstate_is_running()) {
2688 /* when the CPU is running, we cannot do anything except stop
2689 it when receiving a char */
2690 vm_stop(RUN_STATE_PAUSED);
2691 } else
2692 #endif
2694 switch(s->state) {
2695 case RS_IDLE:
2696 if (ch == '$') {
2697 s->line_buf_index = 0;
2698 s->state = RS_GETLINE;
2700 break;
2701 case RS_GETLINE:
2702 if (ch == '#') {
2703 s->state = RS_CHKSUM1;
2704 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2705 s->state = RS_IDLE;
2706 } else {
2707 s->line_buf[s->line_buf_index++] = ch;
2709 break;
2710 case RS_CHKSUM1:
2711 s->line_buf[s->line_buf_index] = '\0';
2712 s->line_csum = fromhex(ch) << 4;
2713 s->state = RS_CHKSUM2;
2714 break;
2715 case RS_CHKSUM2:
2716 s->line_csum |= fromhex(ch);
2717 csum = 0;
2718 for(i = 0; i < s->line_buf_index; i++) {
2719 csum += s->line_buf[i];
2721 if (s->line_csum != (csum & 0xff)) {
2722 reply = '-';
2723 put_buffer(s, &reply, 1);
2724 s->state = RS_IDLE;
2725 } else {
2726 reply = '+';
2727 put_buffer(s, &reply, 1);
2728 s->state = gdb_handle_packet(s, s->line_buf);
2730 break;
2731 default:
2732 abort();
2737 /* Tell the remote gdb that the process has exited. */
2738 void gdb_exit(CPUArchState *env, int code)
2740 GDBState *s;
2741 char buf[4];
2743 s = gdbserver_state;
2744 if (!s) {
2745 return;
2747 #ifdef CONFIG_USER_ONLY
2748 if (gdbserver_fd < 0 || s->fd < 0) {
2749 return;
2751 #endif
2753 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2754 put_packet(s, buf);
2756 #ifndef CONFIG_USER_ONLY
2757 if (s->chr) {
2758 qemu_chr_delete(s->chr);
2760 #endif
2763 #ifdef CONFIG_USER_ONLY
2765 gdb_queuesig (void)
2767 GDBState *s;
2769 s = gdbserver_state;
2771 if (gdbserver_fd < 0 || s->fd < 0)
2772 return 0;
2773 else
2774 return 1;
2778 gdb_handlesig (CPUArchState *env, int sig)
2780 GDBState *s;
2781 char buf[256];
2782 int n;
2784 s = gdbserver_state;
2785 if (gdbserver_fd < 0 || s->fd < 0)
2786 return sig;
2788 /* disable single step if it was enabled */
2789 cpu_single_step(env, 0);
2790 tb_flush(env);
2792 if (sig != 0)
2794 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2795 put_packet(s, buf);
2797 /* put_packet() might have detected that the peer terminated the
2798 connection. */
2799 if (s->fd < 0)
2800 return sig;
2802 sig = 0;
2803 s->state = RS_IDLE;
2804 s->running_state = 0;
2805 while (s->running_state == 0) {
2806 n = read (s->fd, buf, 256);
2807 if (n > 0)
2809 int i;
2811 for (i = 0; i < n; i++)
2812 gdb_read_byte (s, buf[i]);
2814 else if (n == 0 || errno != EAGAIN)
2816 /* XXX: Connection closed. Should probably wait for another
2817 connection before continuing. */
2818 return sig;
2821 sig = s->signal;
2822 s->signal = 0;
2823 return sig;
2826 /* Tell the remote gdb that the process has exited due to SIG. */
2827 void gdb_signalled(CPUArchState *env, int sig)
2829 GDBState *s;
2830 char buf[4];
2832 s = gdbserver_state;
2833 if (gdbserver_fd < 0 || s->fd < 0)
2834 return;
2836 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2837 put_packet(s, buf);
2840 static void gdb_accept(void)
2842 GDBState *s;
2843 struct sockaddr_in sockaddr;
2844 socklen_t len;
2845 int fd;
2847 for(;;) {
2848 len = sizeof(sockaddr);
2849 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2850 if (fd < 0 && errno != EINTR) {
2851 perror("accept");
2852 return;
2853 } else if (fd >= 0) {
2854 #ifndef _WIN32
2855 fcntl(fd, F_SETFD, FD_CLOEXEC);
2856 #endif
2857 break;
2861 /* set short latency */
2862 socket_set_nodelay(fd);
2864 s = g_malloc0(sizeof(GDBState));
2865 s->c_cpu = first_cpu;
2866 s->g_cpu = first_cpu;
2867 s->fd = fd;
2868 gdb_has_xml = 0;
2870 gdbserver_state = s;
2872 fcntl(fd, F_SETFL, O_NONBLOCK);
2875 static int gdbserver_open(int port)
2877 struct sockaddr_in sockaddr;
2878 int fd, val, ret;
2880 fd = socket(PF_INET, SOCK_STREAM, 0);
2881 if (fd < 0) {
2882 perror("socket");
2883 return -1;
2885 #ifndef _WIN32
2886 fcntl(fd, F_SETFD, FD_CLOEXEC);
2887 #endif
2889 /* allow fast reuse */
2890 val = 1;
2891 setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val));
2893 sockaddr.sin_family = AF_INET;
2894 sockaddr.sin_port = htons(port);
2895 sockaddr.sin_addr.s_addr = 0;
2896 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2897 if (ret < 0) {
2898 perror("bind");
2899 close(fd);
2900 return -1;
2902 ret = listen(fd, 0);
2903 if (ret < 0) {
2904 perror("listen");
2905 close(fd);
2906 return -1;
2908 return fd;
2911 int gdbserver_start(int port)
2913 gdbserver_fd = gdbserver_open(port);
2914 if (gdbserver_fd < 0)
2915 return -1;
2916 /* accept connections */
2917 gdb_accept();
2918 return 0;
2921 /* Disable gdb stub for child processes. */
2922 void gdbserver_fork(CPUArchState *env)
2924 GDBState *s = gdbserver_state;
2925 if (gdbserver_fd < 0 || s->fd < 0)
2926 return;
2927 close(s->fd);
2928 s->fd = -1;
2929 cpu_breakpoint_remove_all(env, BP_GDB);
2930 cpu_watchpoint_remove_all(env, BP_GDB);
2932 #else
2933 static int gdb_chr_can_receive(void *opaque)
2935 /* We can handle an arbitrarily large amount of data.
2936 Pick the maximum packet size, which is as good as anything. */
2937 return MAX_PACKET_LENGTH;
2940 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2942 int i;
2944 for (i = 0; i < size; i++) {
2945 gdb_read_byte(gdbserver_state, buf[i]);
2949 static void gdb_chr_event(void *opaque, int event)
2951 switch (event) {
2952 case CHR_EVENT_OPENED:
2953 vm_stop(RUN_STATE_PAUSED);
2954 gdb_has_xml = 0;
2955 break;
2956 default:
2957 break;
2961 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2963 char buf[MAX_PACKET_LENGTH];
2965 buf[0] = 'O';
2966 if (len > (MAX_PACKET_LENGTH/2) - 1)
2967 len = (MAX_PACKET_LENGTH/2) - 1;
2968 memtohex(buf + 1, (uint8_t *)msg, len);
2969 put_packet(s, buf);
2972 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2974 const char *p = (const char *)buf;
2975 int max_sz;
2977 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2978 for (;;) {
2979 if (len <= max_sz) {
2980 gdb_monitor_output(gdbserver_state, p, len);
2981 break;
2983 gdb_monitor_output(gdbserver_state, p, max_sz);
2984 p += max_sz;
2985 len -= max_sz;
2987 return len;
2990 #ifndef _WIN32
2991 static void gdb_sigterm_handler(int signal)
2993 if (runstate_is_running()) {
2994 vm_stop(RUN_STATE_PAUSED);
2997 #endif
2999 int gdbserver_start(const char *device)
3001 GDBState *s;
3002 char gdbstub_device_name[128];
3003 CharDriverState *chr = NULL;
3004 CharDriverState *mon_chr;
3006 if (!device)
3007 return -1;
3008 if (strcmp(device, "none") != 0) {
3009 if (strstart(device, "tcp:", NULL)) {
3010 /* enforce required TCP attributes */
3011 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3012 "%s,nowait,nodelay,server", device);
3013 device = gdbstub_device_name;
3015 #ifndef _WIN32
3016 else if (strcmp(device, "stdio") == 0) {
3017 struct sigaction act;
3019 memset(&act, 0, sizeof(act));
3020 act.sa_handler = gdb_sigterm_handler;
3021 sigaction(SIGINT, &act, NULL);
3023 #endif
3024 chr = qemu_chr_new("gdb", device, NULL);
3025 if (!chr)
3026 return -1;
3028 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3029 gdb_chr_event, NULL);
3032 s = gdbserver_state;
3033 if (!s) {
3034 s = g_malloc0(sizeof(GDBState));
3035 gdbserver_state = s;
3037 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3039 /* Initialize a monitor terminal for gdb */
3040 mon_chr = g_malloc0(sizeof(*mon_chr));
3041 mon_chr->chr_write = gdb_monitor_write;
3042 monitor_init(mon_chr, 0);
3043 } else {
3044 if (s->chr)
3045 qemu_chr_delete(s->chr);
3046 mon_chr = s->mon_chr;
3047 memset(s, 0, sizeof(GDBState));
3049 s->c_cpu = first_cpu;
3050 s->g_cpu = first_cpu;
3051 s->chr = chr;
3052 s->state = chr ? RS_IDLE : RS_INACTIVE;
3053 s->mon_chr = mon_chr;
3054 s->current_syscall_cb = NULL;
3056 return 0;
3058 #endif