4 * Copyright (c) 2003-2005 Fabrice Bellard
5 * Copyright (c) 2013 SUSE LINUX Products GmbH
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/gdbstub.h"
25 static const int gpr_map
[16] = {
26 R_EAX
, R_EBX
, R_ECX
, R_EDX
, R_ESI
, R_EDI
, R_EBP
, R_ESP
,
27 8, 9, 10, 11, 12, 13, 14, 15
30 #define gpr_map gpr_map32
32 static const int gpr_map32
[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
35 * Keep these in sync with assignment to
36 * gdb_num_core_regs in target/i386/cpu.c
37 * and with the machine description
41 * SEG: 6 segments, plus fs_base, gs_base, kernel_gs_base
45 * general regs -----> 8 or 16
48 #define IDX_NB_FLAGS 1
49 #define IDX_NB_SEG (6 + 3)
53 * fpu regs ----------> 8 or 16
55 #define IDX_NB_MXCSR 1
57 * total ----> 8+1+1+9+6+16+8+1=50 or 16+1+1+9+6+16+16+1=66
60 #define IDX_IP_REG CPU_NB_REGS
61 #define IDX_FLAGS_REG (IDX_IP_REG + IDX_NB_IP)
62 #define IDX_SEG_REGS (IDX_FLAGS_REG + IDX_NB_FLAGS)
63 #define IDX_CTL_REGS (IDX_SEG_REGS + IDX_NB_SEG)
64 #define IDX_FP_REGS (IDX_CTL_REGS + IDX_NB_CTL)
65 #define IDX_XMM_REGS (IDX_FP_REGS + IDX_NB_FP)
66 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
68 #define IDX_CTL_CR0_REG (IDX_CTL_REGS + 0)
69 #define IDX_CTL_CR2_REG (IDX_CTL_REGS + 1)
70 #define IDX_CTL_CR3_REG (IDX_CTL_REGS + 2)
71 #define IDX_CTL_CR4_REG (IDX_CTL_REGS + 3)
72 #define IDX_CTL_CR8_REG (IDX_CTL_REGS + 4)
73 #define IDX_CTL_EFER_REG (IDX_CTL_REGS + 5)
76 #define GDB_FORCE_64 1
78 #define GDB_FORCE_64 0
82 int x86_cpu_gdb_read_register(CPUState
*cs
, GByteArray
*mem_buf
, int n
)
84 X86CPU
*cpu
= X86_CPU(cs
);
85 CPUX86State
*env
= &cpu
->env
;
89 /* N.B. GDB can't deal with changes in registers or sizes in the middle
90 of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
91 as if we're on a 64-bit cpu. */
93 if (n
< CPU_NB_REGS
) {
94 if (TARGET_LONG_BITS
== 64) {
95 if (env
->hflags
& HF_CS64_MASK
) {
96 return gdb_get_reg64(mem_buf
, env
->regs
[gpr_map
[n
]]);
97 } else if (n
< CPU_NB_REGS32
) {
98 return gdb_get_reg64(mem_buf
,
99 env
->regs
[gpr_map
[n
]] & 0xffffffffUL
);
101 return gdb_get_regl(mem_buf
, 0);
104 return gdb_get_reg32(mem_buf
, env
->regs
[gpr_map32
[n
]]);
106 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
107 floatx80
*fp
= (floatx80
*) &env
->fpregs
[n
- IDX_FP_REGS
];
108 int len
= gdb_get_reg64(mem_buf
, cpu_to_le64(fp
->low
));
109 len
+= gdb_get_reg16(mem_buf
, cpu_to_le16(fp
->high
));
111 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
113 if (n
< CPU_NB_REGS32
|| TARGET_LONG_BITS
== 64) {
114 return gdb_get_reg128(mem_buf
,
115 env
->xmm_regs
[n
].ZMM_Q(0),
116 env
->xmm_regs
[n
].ZMM_Q(1));
121 if (TARGET_LONG_BITS
== 64) {
122 if (env
->hflags
& HF_CS64_MASK
) {
123 return gdb_get_reg64(mem_buf
, env
->eip
);
125 return gdb_get_reg64(mem_buf
, env
->eip
& 0xffffffffUL
);
128 return gdb_get_reg32(mem_buf
, env
->eip
);
131 return gdb_get_reg32(mem_buf
, env
->eflags
);
134 return gdb_get_reg32(mem_buf
, env
->segs
[R_CS
].selector
);
135 case IDX_SEG_REGS
+ 1:
136 return gdb_get_reg32(mem_buf
, env
->segs
[R_SS
].selector
);
137 case IDX_SEG_REGS
+ 2:
138 return gdb_get_reg32(mem_buf
, env
->segs
[R_DS
].selector
);
139 case IDX_SEG_REGS
+ 3:
140 return gdb_get_reg32(mem_buf
, env
->segs
[R_ES
].selector
);
141 case IDX_SEG_REGS
+ 4:
142 return gdb_get_reg32(mem_buf
, env
->segs
[R_FS
].selector
);
143 case IDX_SEG_REGS
+ 5:
144 return gdb_get_reg32(mem_buf
, env
->segs
[R_GS
].selector
);
146 case IDX_SEG_REGS
+ 6:
147 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
148 return gdb_get_reg64(mem_buf
, env
->segs
[R_FS
].base
);
150 return gdb_get_reg32(mem_buf
, env
->segs
[R_FS
].base
);
152 case IDX_SEG_REGS
+ 7:
153 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
154 return gdb_get_reg64(mem_buf
, env
->segs
[R_GS
].base
);
156 return gdb_get_reg32(mem_buf
, env
->segs
[R_GS
].base
);
158 case IDX_SEG_REGS
+ 8:
160 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
161 return gdb_get_reg64(mem_buf
, env
->kernelgsbase
);
163 return gdb_get_reg32(mem_buf
, env
->kernelgsbase
);
165 return gdb_get_reg32(mem_buf
, 0);
168 case IDX_FP_REGS
+ 8:
169 return gdb_get_reg32(mem_buf
, env
->fpuc
);
170 case IDX_FP_REGS
+ 9:
171 return gdb_get_reg32(mem_buf
, (env
->fpus
& ~0x3800) |
172 (env
->fpstt
& 0x7) << 11);
173 case IDX_FP_REGS
+ 10:
174 return gdb_get_reg32(mem_buf
, 0); /* ftag */
175 case IDX_FP_REGS
+ 11:
176 return gdb_get_reg32(mem_buf
, 0); /* fiseg */
177 case IDX_FP_REGS
+ 12:
178 return gdb_get_reg32(mem_buf
, 0); /* fioff */
179 case IDX_FP_REGS
+ 13:
180 return gdb_get_reg32(mem_buf
, 0); /* foseg */
181 case IDX_FP_REGS
+ 14:
182 return gdb_get_reg32(mem_buf
, 0); /* fooff */
183 case IDX_FP_REGS
+ 15:
184 return gdb_get_reg32(mem_buf
, 0); /* fop */
187 update_mxcsr_from_sse_status(env
);
188 return gdb_get_reg32(mem_buf
, env
->mxcsr
);
190 case IDX_CTL_CR0_REG
:
191 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
192 return gdb_get_reg64(mem_buf
, env
->cr
[0]);
194 return gdb_get_reg32(mem_buf
, env
->cr
[0]);
196 case IDX_CTL_CR2_REG
:
197 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
198 return gdb_get_reg64(mem_buf
, env
->cr
[2]);
200 return gdb_get_reg32(mem_buf
, env
->cr
[2]);
202 case IDX_CTL_CR3_REG
:
203 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
204 return gdb_get_reg64(mem_buf
, env
->cr
[3]);
206 return gdb_get_reg32(mem_buf
, env
->cr
[3]);
208 case IDX_CTL_CR4_REG
:
209 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
210 return gdb_get_reg64(mem_buf
, env
->cr
[4]);
212 return gdb_get_reg32(mem_buf
, env
->cr
[4]);
214 case IDX_CTL_CR8_REG
:
215 #ifdef CONFIG_SOFTMMU
216 tpr
= cpu_get_apic_tpr(cpu
->apic_state
);
220 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
221 return gdb_get_reg64(mem_buf
, tpr
);
223 return gdb_get_reg32(mem_buf
, tpr
);
225 case IDX_CTL_EFER_REG
:
226 if ((env
->hflags
& HF_CS64_MASK
) || GDB_FORCE_64
) {
227 return gdb_get_reg64(mem_buf
, env
->efer
);
229 return gdb_get_reg32(mem_buf
, env
->efer
);
235 static int x86_cpu_gdb_load_seg(X86CPU
*cpu
, int sreg
, uint8_t *mem_buf
)
237 CPUX86State
*env
= &cpu
->env
;
238 uint16_t selector
= ldl_p(mem_buf
);
240 if (selector
!= env
->segs
[sreg
].selector
) {
241 #if defined(CONFIG_USER_ONLY)
242 cpu_x86_load_seg(env
, sreg
, selector
);
244 unsigned int limit
, flags
;
247 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
248 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
249 base
= selector
<< 4;
251 flags
= DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
252 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
);
254 if (!cpu_x86_get_descr_debug(env
, selector
, &base
, &limit
,
259 cpu_x86_load_seg_cache(env
, sreg
, selector
, base
, limit
, flags
);
265 int x86_cpu_gdb_write_register(CPUState
*cs
, uint8_t *mem_buf
, int n
)
267 X86CPU
*cpu
= X86_CPU(cs
);
268 CPUX86State
*env
= &cpu
->env
;
271 /* N.B. GDB can't deal with changes in registers or sizes in the middle
272 of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
273 as if we're on a 64-bit cpu. */
275 if (n
< CPU_NB_REGS
) {
276 if (TARGET_LONG_BITS
== 64) {
277 if (env
->hflags
& HF_CS64_MASK
) {
278 env
->regs
[gpr_map
[n
]] = ldtul_p(mem_buf
);
279 } else if (n
< CPU_NB_REGS32
) {
280 env
->regs
[gpr_map
[n
]] = ldtul_p(mem_buf
) & 0xffffffffUL
;
282 return sizeof(target_ulong
);
283 } else if (n
< CPU_NB_REGS32
) {
285 env
->regs
[n
] &= ~0xffffffffUL
;
286 env
->regs
[n
] |= (uint32_t)ldl_p(mem_buf
);
289 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
290 floatx80
*fp
= (floatx80
*) &env
->fpregs
[n
- IDX_FP_REGS
];
291 fp
->low
= le64_to_cpu(* (uint64_t *) mem_buf
);
292 fp
->high
= le16_to_cpu(* (uint16_t *) (mem_buf
+ 8));
294 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
296 if (n
< CPU_NB_REGS32
|| TARGET_LONG_BITS
== 64) {
297 env
->xmm_regs
[n
].ZMM_Q(0) = ldq_p(mem_buf
);
298 env
->xmm_regs
[n
].ZMM_Q(1) = ldq_p(mem_buf
+ 8);
304 if (TARGET_LONG_BITS
== 64) {
305 if (env
->hflags
& HF_CS64_MASK
) {
306 env
->eip
= ldq_p(mem_buf
);
308 env
->eip
= ldq_p(mem_buf
) & 0xffffffffUL
;
312 env
->eip
&= ~0xffffffffUL
;
313 env
->eip
|= (uint32_t)ldl_p(mem_buf
);
317 env
->eflags
= ldl_p(mem_buf
);
321 return x86_cpu_gdb_load_seg(cpu
, R_CS
, mem_buf
);
322 case IDX_SEG_REGS
+ 1:
323 return x86_cpu_gdb_load_seg(cpu
, R_SS
, mem_buf
);
324 case IDX_SEG_REGS
+ 2:
325 return x86_cpu_gdb_load_seg(cpu
, R_DS
, mem_buf
);
326 case IDX_SEG_REGS
+ 3:
327 return x86_cpu_gdb_load_seg(cpu
, R_ES
, mem_buf
);
328 case IDX_SEG_REGS
+ 4:
329 return x86_cpu_gdb_load_seg(cpu
, R_FS
, mem_buf
);
330 case IDX_SEG_REGS
+ 5:
331 return x86_cpu_gdb_load_seg(cpu
, R_GS
, mem_buf
);
333 case IDX_SEG_REGS
+ 6:
334 if (env
->hflags
& HF_CS64_MASK
) {
335 env
->segs
[R_FS
].base
= ldq_p(mem_buf
);
338 env
->segs
[R_FS
].base
= ldl_p(mem_buf
);
341 case IDX_SEG_REGS
+ 7:
342 if (env
->hflags
& HF_CS64_MASK
) {
343 env
->segs
[R_GS
].base
= ldq_p(mem_buf
);
346 env
->segs
[R_GS
].base
= ldl_p(mem_buf
);
349 case IDX_SEG_REGS
+ 8:
351 if (env
->hflags
& HF_CS64_MASK
) {
352 env
->kernelgsbase
= ldq_p(mem_buf
);
355 env
->kernelgsbase
= ldl_p(mem_buf
);
359 case IDX_FP_REGS
+ 8:
360 cpu_set_fpuc(env
, ldl_p(mem_buf
));
362 case IDX_FP_REGS
+ 9:
363 tmp
= ldl_p(mem_buf
);
364 env
->fpstt
= (tmp
>> 11) & 7;
365 env
->fpus
= tmp
& ~0x3800;
367 case IDX_FP_REGS
+ 10: /* ftag */
369 case IDX_FP_REGS
+ 11: /* fiseg */
371 case IDX_FP_REGS
+ 12: /* fioff */
373 case IDX_FP_REGS
+ 13: /* foseg */
375 case IDX_FP_REGS
+ 14: /* fooff */
377 case IDX_FP_REGS
+ 15: /* fop */
381 cpu_set_mxcsr(env
, ldl_p(mem_buf
));
384 case IDX_CTL_CR0_REG
:
385 if (env
->hflags
& HF_CS64_MASK
) {
386 cpu_x86_update_cr0(env
, ldq_p(mem_buf
));
389 cpu_x86_update_cr0(env
, ldl_p(mem_buf
));
392 case IDX_CTL_CR2_REG
:
393 if (env
->hflags
& HF_CS64_MASK
) {
394 env
->cr
[2] = ldq_p(mem_buf
);
397 env
->cr
[2] = ldl_p(mem_buf
);
400 case IDX_CTL_CR3_REG
:
401 if (env
->hflags
& HF_CS64_MASK
) {
402 cpu_x86_update_cr3(env
, ldq_p(mem_buf
));
405 cpu_x86_update_cr3(env
, ldl_p(mem_buf
));
408 case IDX_CTL_CR4_REG
:
409 if (env
->hflags
& HF_CS64_MASK
) {
410 cpu_x86_update_cr4(env
, ldq_p(mem_buf
));
413 cpu_x86_update_cr4(env
, ldl_p(mem_buf
));
416 case IDX_CTL_CR8_REG
:
417 if (env
->hflags
& HF_CS64_MASK
) {
418 #ifdef CONFIG_SOFTMMU
419 cpu_set_apic_tpr(cpu
->apic_state
, ldq_p(mem_buf
));
423 #ifdef CONFIG_SOFTMMU
424 cpu_set_apic_tpr(cpu
->apic_state
, ldl_p(mem_buf
));
428 case IDX_CTL_EFER_REG
:
429 if (env
->hflags
& HF_CS64_MASK
) {
430 cpu_load_efer(env
, ldq_p(mem_buf
));
433 cpu_load_efer(env
, ldl_p(mem_buf
));
438 /* Unrecognised register. */