4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "monitor/hmp.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-commands-misc-target.h"
33 #include "qapi/qapi-commands-misc.h"
35 /* Perform linear address sign extension */
36 static hwaddr
addr_canonical(CPUArchState
*env
, hwaddr addr
)
39 if (env
->cr
[4] & CR4_LA57_MASK
) {
40 if (addr
& (1ULL << 56)) {
41 addr
|= (hwaddr
)-(1LL << 57);
44 if (addr
& (1ULL << 47)) {
45 addr
|= (hwaddr
)-(1LL << 48);
52 static void print_pte(Monitor
*mon
, CPUArchState
*env
, hwaddr addr
,
53 hwaddr pte
, hwaddr mask
)
55 addr
= addr_canonical(env
, addr
);
57 monitor_printf(mon
, HWADDR_FMT_plx
": " HWADDR_FMT_plx
58 " %c%c%c%c%c%c%c%c%c\n",
61 pte
& PG_NX_MASK
? 'X' : '-',
62 pte
& PG_GLOBAL_MASK
? 'G' : '-',
63 pte
& PG_PSE_MASK
? 'P' : '-',
64 pte
& PG_DIRTY_MASK
? 'D' : '-',
65 pte
& PG_ACCESSED_MASK
? 'A' : '-',
66 pte
& PG_PCD_MASK
? 'C' : '-',
67 pte
& PG_PWT_MASK
? 'T' : '-',
68 pte
& PG_USER_MASK
? 'U' : '-',
69 pte
& PG_RW_MASK
? 'W' : '-');
72 static void tlb_info_32(Monitor
*mon
, CPUArchState
*env
)
75 uint32_t pgd
, pde
, pte
;
77 pgd
= env
->cr
[3] & ~0xfff;
78 for(l1
= 0; l1
< 1024; l1
++) {
79 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
80 pde
= le32_to_cpu(pde
);
81 if (pde
& PG_PRESENT_MASK
) {
82 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
84 print_pte(mon
, env
, (l1
<< 22), pde
, ~((1 << 21) - 1));
86 for(l2
= 0; l2
< 1024; l2
++) {
87 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
88 pte
= le32_to_cpu(pte
);
89 if (pte
& PG_PRESENT_MASK
) {
90 print_pte(mon
, env
, (l1
<< 22) + (l2
<< 12),
100 static void tlb_info_pae32(Monitor
*mon
, CPUArchState
*env
)
102 unsigned int l1
, l2
, l3
;
103 uint64_t pdpe
, pde
, pte
;
104 uint64_t pdp_addr
, pd_addr
, pt_addr
;
106 pdp_addr
= env
->cr
[3] & ~0x1f;
107 for (l1
= 0; l1
< 4; l1
++) {
108 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
109 pdpe
= le64_to_cpu(pdpe
);
110 if (pdpe
& PG_PRESENT_MASK
) {
111 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
112 for (l2
= 0; l2
< 512; l2
++) {
113 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
114 pde
= le64_to_cpu(pde
);
115 if (pde
& PG_PRESENT_MASK
) {
116 if (pde
& PG_PSE_MASK
) {
117 /* 2M pages with PAE, CR4.PSE is ignored */
118 print_pte(mon
, env
, (l1
<< 30) + (l2
<< 21), pde
,
119 ~((hwaddr
)(1 << 20) - 1));
121 pt_addr
= pde
& 0x3fffffffff000ULL
;
122 for (l3
= 0; l3
< 512; l3
++) {
123 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
124 pte
= le64_to_cpu(pte
);
125 if (pte
& PG_PRESENT_MASK
) {
126 print_pte(mon
, env
, (l1
<< 30) + (l2
<< 21)
140 static void tlb_info_la48(Monitor
*mon
, CPUArchState
*env
,
141 uint64_t l0
, uint64_t pml4_addr
)
143 uint64_t l1
, l2
, l3
, l4
;
144 uint64_t pml4e
, pdpe
, pde
, pte
;
145 uint64_t pdp_addr
, pd_addr
, pt_addr
;
147 for (l1
= 0; l1
< 512; l1
++) {
148 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
149 pml4e
= le64_to_cpu(pml4e
);
150 if (!(pml4e
& PG_PRESENT_MASK
)) {
154 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
155 for (l2
= 0; l2
< 512; l2
++) {
156 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
157 pdpe
= le64_to_cpu(pdpe
);
158 if (!(pdpe
& PG_PRESENT_MASK
)) {
162 if (pdpe
& PG_PSE_MASK
) {
163 /* 1G pages, CR4.PSE is ignored */
164 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) + (l2
<< 30),
165 pdpe
, 0x3ffffc0000000ULL
);
169 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
170 for (l3
= 0; l3
< 512; l3
++) {
171 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
172 pde
= le64_to_cpu(pde
);
173 if (!(pde
& PG_PRESENT_MASK
)) {
177 if (pde
& PG_PSE_MASK
) {
178 /* 2M pages, CR4.PSE is ignored */
179 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) + (l2
<< 30) +
180 (l3
<< 21), pde
, 0x3ffffffe00000ULL
);
184 pt_addr
= pde
& 0x3fffffffff000ULL
;
185 for (l4
= 0; l4
< 512; l4
++) {
186 cpu_physical_memory_read(pt_addr
189 pte
= le64_to_cpu(pte
);
190 if (pte
& PG_PRESENT_MASK
) {
191 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) +
192 (l2
<< 30) + (l3
<< 21) + (l4
<< 12),
193 pte
& ~PG_PSE_MASK
, 0x3fffffffff000ULL
);
201 static void tlb_info_la57(Monitor
*mon
, CPUArchState
*env
)
207 pml5_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
208 for (l0
= 0; l0
< 512; l0
++) {
209 cpu_physical_memory_read(pml5_addr
+ l0
* 8, &pml5e
, 8);
210 pml5e
= le64_to_cpu(pml5e
);
211 if (pml5e
& PG_PRESENT_MASK
) {
212 tlb_info_la48(mon
, env
, l0
, pml5e
& 0x3fffffffff000ULL
);
216 #endif /* TARGET_X86_64 */
218 void hmp_info_tlb(Monitor
*mon
, const QDict
*qdict
)
222 env
= mon_get_cpu_env(mon
);
224 monitor_printf(mon
, "No CPU available\n");
228 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
229 monitor_printf(mon
, "PG disabled\n");
232 if (env
->cr
[4] & CR4_PAE_MASK
) {
234 if (env
->hflags
& HF_LMA_MASK
) {
235 if (env
->cr
[4] & CR4_LA57_MASK
) {
236 tlb_info_la57(mon
, env
);
238 tlb_info_la48(mon
, env
, 0, env
->cr
[3] & 0x3fffffffff000ULL
);
243 tlb_info_pae32(mon
, env
);
246 tlb_info_32(mon
, env
);
250 static void mem_print(Monitor
*mon
, CPUArchState
*env
,
251 hwaddr
*pstart
, int *plast_prot
,
252 hwaddr end
, int prot
)
258 monitor_printf(mon
, HWADDR_FMT_plx
"-" HWADDR_FMT_plx
" "
259 HWADDR_FMT_plx
" %c%c%c\n",
260 addr_canonical(env
, *pstart
),
261 addr_canonical(env
, end
),
262 addr_canonical(env
, end
- *pstart
),
263 prot1
& PG_USER_MASK
? 'u' : '-',
265 prot1
& PG_RW_MASK
? 'w' : '-');
275 static void mem_info_32(Monitor
*mon
, CPUArchState
*env
)
279 uint32_t pgd
, pde
, pte
;
282 pgd
= env
->cr
[3] & ~0xfff;
285 for(l1
= 0; l1
< 1024; l1
++) {
286 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
287 pde
= le32_to_cpu(pde
);
289 if (pde
& PG_PRESENT_MASK
) {
290 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
291 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
292 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
294 for(l2
= 0; l2
< 1024; l2
++) {
295 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
296 pte
= le32_to_cpu(pte
);
297 end
= (l1
<< 22) + (l2
<< 12);
298 if (pte
& PG_PRESENT_MASK
) {
300 (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
304 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
309 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
312 /* Flush last range */
313 mem_print(mon
, env
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
316 static void mem_info_pae32(Monitor
*mon
, CPUArchState
*env
)
318 unsigned int l1
, l2
, l3
;
320 uint64_t pdpe
, pde
, pte
;
321 uint64_t pdp_addr
, pd_addr
, pt_addr
;
324 pdp_addr
= env
->cr
[3] & ~0x1f;
327 for (l1
= 0; l1
< 4; l1
++) {
328 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
329 pdpe
= le64_to_cpu(pdpe
);
331 if (pdpe
& PG_PRESENT_MASK
) {
332 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
333 for (l2
= 0; l2
< 512; l2
++) {
334 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
335 pde
= le64_to_cpu(pde
);
336 end
= (l1
<< 30) + (l2
<< 21);
337 if (pde
& PG_PRESENT_MASK
) {
338 if (pde
& PG_PSE_MASK
) {
339 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
341 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
343 pt_addr
= pde
& 0x3fffffffff000ULL
;
344 for (l3
= 0; l3
< 512; l3
++) {
345 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
346 pte
= le64_to_cpu(pte
);
347 end
= (l1
<< 30) + (l2
<< 21) + (l3
<< 12);
348 if (pte
& PG_PRESENT_MASK
) {
349 prot
= pte
& pde
& (PG_USER_MASK
| PG_RW_MASK
|
354 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
359 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
364 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
367 /* Flush last range */
368 mem_print(mon
, env
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
373 static void mem_info_la48(Monitor
*mon
, CPUArchState
*env
)
376 uint64_t l1
, l2
, l3
, l4
;
377 uint64_t pml4e
, pdpe
, pde
, pte
;
378 uint64_t pml4_addr
, pdp_addr
, pd_addr
, pt_addr
, start
, end
;
380 pml4_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
383 for (l1
= 0; l1
< 512; l1
++) {
384 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
385 pml4e
= le64_to_cpu(pml4e
);
387 if (pml4e
& PG_PRESENT_MASK
) {
388 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
389 for (l2
= 0; l2
< 512; l2
++) {
390 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
391 pdpe
= le64_to_cpu(pdpe
);
392 end
= (l1
<< 39) + (l2
<< 30);
393 if (pdpe
& PG_PRESENT_MASK
) {
394 if (pdpe
& PG_PSE_MASK
) {
395 prot
= pdpe
& (PG_USER_MASK
| PG_RW_MASK
|
398 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
400 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
401 for (l3
= 0; l3
< 512; l3
++) {
402 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
403 pde
= le64_to_cpu(pde
);
404 end
= (l1
<< 39) + (l2
<< 30) + (l3
<< 21);
405 if (pde
& PG_PRESENT_MASK
) {
406 if (pde
& PG_PSE_MASK
) {
407 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
409 prot
&= pml4e
& pdpe
;
410 mem_print(mon
, env
, &start
,
411 &last_prot
, end
, prot
);
413 pt_addr
= pde
& 0x3fffffffff000ULL
;
414 for (l4
= 0; l4
< 512; l4
++) {
415 cpu_physical_memory_read(pt_addr
418 pte
= le64_to_cpu(pte
);
419 end
= (l1
<< 39) + (l2
<< 30) +
420 (l3
<< 21) + (l4
<< 12);
421 if (pte
& PG_PRESENT_MASK
) {
422 prot
= pte
& (PG_USER_MASK
| PG_RW_MASK
|
424 prot
&= pml4e
& pdpe
& pde
;
428 mem_print(mon
, env
, &start
,
429 &last_prot
, end
, prot
);
434 mem_print(mon
, env
, &start
,
435 &last_prot
, end
, prot
);
441 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
446 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
449 /* Flush last range */
450 mem_print(mon
, env
, &start
, &last_prot
, (hwaddr
)1 << 48, 0);
453 static void mem_info_la57(Monitor
*mon
, CPUArchState
*env
)
456 uint64_t l0
, l1
, l2
, l3
, l4
;
457 uint64_t pml5e
, pml4e
, pdpe
, pde
, pte
;
458 uint64_t pml5_addr
, pml4_addr
, pdp_addr
, pd_addr
, pt_addr
, start
, end
;
460 pml5_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
463 for (l0
= 0; l0
< 512; l0
++) {
464 cpu_physical_memory_read(pml5_addr
+ l0
* 8, &pml5e
, 8);
465 pml5e
= le64_to_cpu(pml5e
);
467 if (!(pml5e
& PG_PRESENT_MASK
)) {
469 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
473 pml4_addr
= pml5e
& 0x3fffffffff000ULL
;
474 for (l1
= 0; l1
< 512; l1
++) {
475 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
476 pml4e
= le64_to_cpu(pml4e
);
477 end
= (l0
<< 48) + (l1
<< 39);
478 if (!(pml4e
& PG_PRESENT_MASK
)) {
480 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
484 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
485 for (l2
= 0; l2
< 512; l2
++) {
486 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
487 pdpe
= le64_to_cpu(pdpe
);
488 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30);
489 if (pdpe
& PG_PRESENT_MASK
) {
491 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
495 if (pdpe
& PG_PSE_MASK
) {
496 prot
= pdpe
& (PG_USER_MASK
| PG_RW_MASK
|
498 prot
&= pml5e
& pml4e
;
499 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
503 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
504 for (l3
= 0; l3
< 512; l3
++) {
505 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
506 pde
= le64_to_cpu(pde
);
507 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30) + (l3
<< 21);
508 if (pde
& PG_PRESENT_MASK
) {
510 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
514 if (pde
& PG_PSE_MASK
) {
515 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
517 prot
&= pml5e
& pml4e
& pdpe
;
518 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
522 pt_addr
= pde
& 0x3fffffffff000ULL
;
523 for (l4
= 0; l4
< 512; l4
++) {
524 cpu_physical_memory_read(pt_addr
+ l4
* 8, &pte
, 8);
525 pte
= le64_to_cpu(pte
);
526 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30) +
527 (l3
<< 21) + (l4
<< 12);
528 if (pte
& PG_PRESENT_MASK
) {
529 prot
= pte
& (PG_USER_MASK
| PG_RW_MASK
|
531 prot
&= pml5e
& pml4e
& pdpe
& pde
;
535 mem_print(mon
, env
, &start
, &last_prot
, end
, prot
);
541 /* Flush last range */
542 mem_print(mon
, env
, &start
, &last_prot
, (hwaddr
)1 << 57, 0);
544 #endif /* TARGET_X86_64 */
546 void hmp_info_mem(Monitor
*mon
, const QDict
*qdict
)
550 env
= mon_get_cpu_env(mon
);
552 monitor_printf(mon
, "No CPU available\n");
556 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
557 monitor_printf(mon
, "PG disabled\n");
560 if (env
->cr
[4] & CR4_PAE_MASK
) {
562 if (env
->hflags
& HF_LMA_MASK
) {
563 if (env
->cr
[4] & CR4_LA57_MASK
) {
564 mem_info_la57(mon
, env
);
566 mem_info_la48(mon
, env
);
571 mem_info_pae32(mon
, env
);
574 mem_info_32(mon
, env
);
578 void hmp_mce(Monitor
*mon
, const QDict
*qdict
)
582 int cpu_index
= qdict_get_int(qdict
, "cpu_index");
583 int bank
= qdict_get_int(qdict
, "bank");
584 uint64_t status
= qdict_get_int(qdict
, "status");
585 uint64_t mcg_status
= qdict_get_int(qdict
, "mcg_status");
586 uint64_t addr
= qdict_get_int(qdict
, "addr");
587 uint64_t misc
= qdict_get_int(qdict
, "misc");
588 int flags
= MCE_INJECT_UNCOND_AO
;
590 if (qdict_get_try_bool(qdict
, "broadcast", false)) {
591 flags
|= MCE_INJECT_BROADCAST
;
593 cs
= qemu_get_cpu(cpu_index
);
596 cpu_x86_inject_mce(mon
, cpu
, bank
, status
, mcg_status
, addr
, misc
,
601 static target_long
monitor_get_pc(Monitor
*mon
, const struct MonitorDef
*md
,
604 CPUArchState
*env
= mon_get_cpu_env(mon
);
605 return env
->eip
+ env
->segs
[R_CS
].base
;
608 const MonitorDef monitor_defs
[] = {
609 #define SEG(name, seg) \
610 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
611 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
612 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
614 { "eax", offsetof(CPUX86State
, regs
[0]) },
615 { "ecx", offsetof(CPUX86State
, regs
[1]) },
616 { "edx", offsetof(CPUX86State
, regs
[2]) },
617 { "ebx", offsetof(CPUX86State
, regs
[3]) },
618 { "esp|sp", offsetof(CPUX86State
, regs
[4]) },
619 { "ebp|fp", offsetof(CPUX86State
, regs
[5]) },
620 { "esi", offsetof(CPUX86State
, regs
[6]) },
621 { "edi", offsetof(CPUX86State
, regs
[7]) },
623 { "r8", offsetof(CPUX86State
, regs
[8]) },
624 { "r9", offsetof(CPUX86State
, regs
[9]) },
625 { "r10", offsetof(CPUX86State
, regs
[10]) },
626 { "r11", offsetof(CPUX86State
, regs
[11]) },
627 { "r12", offsetof(CPUX86State
, regs
[12]) },
628 { "r13", offsetof(CPUX86State
, regs
[13]) },
629 { "r14", offsetof(CPUX86State
, regs
[14]) },
630 { "r15", offsetof(CPUX86State
, regs
[15]) },
632 { "eflags", offsetof(CPUX86State
, eflags
) },
633 { "eip", offsetof(CPUX86State
, eip
) },
640 { "pc", 0, monitor_get_pc
, },
644 const MonitorDef
*target_monitor_defs(void)