2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
47 #if defined(CONFIG_USER_ONLY)
51 //#define DEBUG_TB_INVALIDATE
54 //#define DEBUG_UNASSIGNED
56 /* make various TB consistency checks */
57 //#define DEBUG_TB_CHECK
58 //#define DEBUG_TLB_CHECK
60 //#define DEBUG_IOPORT
61 //#define DEBUG_SUBPAGE
63 #if !defined(CONFIG_USER_ONLY)
64 /* TB consistency checks only implemented for usermode emulation. */
68 /* Quick hack to enable KSM support */
69 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
71 #define SMC_BITMAP_USE_THRESHOLD 10
73 #if defined(TARGET_SPARC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 41
75 #elif defined(TARGET_SPARC)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 36
77 #elif defined(TARGET_ALPHA)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #define TARGET_VIRT_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_PPC64)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 42
84 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
85 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 #elif defined(TARGET_IA64)
87 #define TARGET_PHYS_ADDR_SPACE_BITS 36
89 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
90 #define TARGET_PHYS_ADDR_SPACE_BITS 32
93 static TranslationBlock
*tbs
;
94 int code_gen_max_blocks
;
95 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
97 /* any access to the tbs or the page table must use this lock */
98 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
100 #if defined(__arm__) || defined(__sparc_v9__)
101 /* The prologue must be reachable with a direct jump. ARM and Sparc64
102 have limited branch ranges (possibly also PPC) so place it in a
103 section close to code segment. */
104 #define code_gen_section \
105 __attribute__((__section__(".gen_code"))) \
106 __attribute__((aligned (32)))
107 #elif defined(_WIN32)
108 /* Maximum alignment for Win32 is 16. */
109 #define code_gen_section \
110 __attribute__((aligned (16)))
112 #define code_gen_section \
113 __attribute__((aligned (32)))
116 uint8_t code_gen_prologue
[1024] code_gen_section
;
117 static uint8_t *code_gen_buffer
;
118 static unsigned long code_gen_buffer_size
;
119 /* threshold to flush the translated code buffer */
120 static unsigned long code_gen_buffer_max_size
;
121 uint8_t *code_gen_ptr
;
123 #if !defined(CONFIG_USER_ONLY)
125 uint8_t *phys_ram_dirty
;
127 static int in_migration
;
129 typedef struct RAMBlock
{
133 struct RAMBlock
*next
;
136 static RAMBlock
*ram_blocks
;
137 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
138 then we can no longer assume contiguous ram offsets, and external uses
139 of this variable will break. */
140 ram_addr_t last_ram_offset
;
144 /* current CPU in the current thread. It is only valid inside
146 CPUState
*cpu_single_env
;
147 /* 0 = Do not count executed instructions.
148 1 = Precise instruction counting.
149 2 = Adaptive rate instruction counting. */
151 /* Current instruction counter. While executing translated code this may
152 include some instructions that have not yet been executed. */
155 typedef struct PageDesc
{
156 /* list of TBs intersecting this ram page */
157 TranslationBlock
*first_tb
;
158 /* in order to optimize self modifying code, we count the number
159 of lookups we do to a given page to use a bitmap */
160 unsigned int code_write_count
;
161 uint8_t *code_bitmap
;
162 #if defined(CONFIG_USER_ONLY)
167 typedef struct PhysPageDesc
{
168 /* offset in host memory of the page + io_index in the low bits */
169 ram_addr_t phys_offset
;
170 ram_addr_t region_offset
;
174 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
175 /* XXX: this is a temporary hack for alpha target.
176 * In the future, this is to be replaced by a multi-level table
177 * to actually be able to handle the complete 64 bits address space.
179 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
181 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
184 #define L1_SIZE (1 << L1_BITS)
185 #define L2_SIZE (1 << L2_BITS)
187 unsigned long qemu_real_host_page_size
;
188 unsigned long qemu_host_page_bits
;
189 unsigned long qemu_host_page_size
;
190 unsigned long qemu_host_page_mask
;
192 /* XXX: for system emulation, it could just be an array */
193 static PageDesc
*l1_map
[L1_SIZE
];
194 static PhysPageDesc
**l1_phys_map
;
196 #if !defined(CONFIG_USER_ONLY)
197 static void io_mem_init(void);
199 /* io memory support */
200 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
201 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
202 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
203 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
204 static int io_mem_watch
;
208 static const char *logfilename
= "/tmp/qemu.log";
211 static int log_append
= 0;
214 static int tlb_flush_count
;
215 static int tb_flush_count
;
216 static int tb_phys_invalidate_count
;
218 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
219 typedef struct subpage_t
{
220 target_phys_addr_t base
;
221 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
222 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
223 void *opaque
[TARGET_PAGE_SIZE
][2][4];
224 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
228 static void map_exec(void *addr
, long size
)
231 VirtualProtect(addr
, size
,
232 PAGE_EXECUTE_READWRITE
, &old_protect
);
236 static void map_exec(void *addr
, long size
)
238 unsigned long start
, end
, page_size
;
240 page_size
= getpagesize();
241 start
= (unsigned long)addr
;
242 start
&= ~(page_size
- 1);
244 end
= (unsigned long)addr
+ size
;
245 end
+= page_size
- 1;
246 end
&= ~(page_size
- 1);
248 mprotect((void *)start
, end
- start
,
249 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
253 static void page_init(void)
255 /* NOTE: we can always suppose that qemu_host_page_size >=
259 SYSTEM_INFO system_info
;
261 GetSystemInfo(&system_info
);
262 qemu_real_host_page_size
= system_info
.dwPageSize
;
265 qemu_real_host_page_size
= getpagesize();
267 if (qemu_host_page_size
== 0)
268 qemu_host_page_size
= qemu_real_host_page_size
;
269 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
270 qemu_host_page_size
= TARGET_PAGE_SIZE
;
271 qemu_host_page_bits
= 0;
272 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
273 qemu_host_page_bits
++;
274 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
275 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
276 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
278 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
280 long long startaddr
, endaddr
;
285 last_brk
= (unsigned long)sbrk(0);
286 f
= fopen("/proc/self/maps", "r");
289 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
291 startaddr
= MIN(startaddr
,
292 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
293 endaddr
= MIN(endaddr
,
294 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
295 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
296 TARGET_PAGE_ALIGN(endaddr
),
307 static inline PageDesc
**page_l1_map(target_ulong index
)
309 #if TARGET_LONG_BITS > 32
310 /* Host memory outside guest VM. For 32-bit targets we have already
311 excluded high addresses. */
312 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
315 return &l1_map
[index
>> L2_BITS
];
318 static inline PageDesc
*page_find_alloc(target_ulong index
)
321 lp
= page_l1_map(index
);
327 /* allocate if not found */
328 #if defined(CONFIG_USER_ONLY)
329 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
330 /* Don't use qemu_malloc because it may recurse. */
331 p
= mmap(NULL
, len
, PROT_READ
| PROT_WRITE
,
332 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
335 unsigned long addr
= h2g(p
);
336 page_set_flags(addr
& TARGET_PAGE_MASK
,
337 TARGET_PAGE_ALIGN(addr
+ len
),
341 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
345 return p
+ (index
& (L2_SIZE
- 1));
348 static inline PageDesc
*page_find(target_ulong index
)
351 lp
= page_l1_map(index
);
359 return p
+ (index
& (L2_SIZE
- 1));
362 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
367 p
= (void **)l1_phys_map
;
368 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
370 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
371 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
373 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
376 /* allocate if not found */
379 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
380 memset(p
, 0, sizeof(void *) * L1_SIZE
);
384 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
388 /* allocate if not found */
391 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
393 for (i
= 0; i
< L2_SIZE
; i
++) {
394 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
395 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
398 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
401 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
403 return phys_page_find_alloc(index
, 0);
406 #if !defined(CONFIG_USER_ONLY)
407 static void tlb_protect_code(ram_addr_t ram_addr
);
408 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
410 #define mmap_lock() do { } while(0)
411 #define mmap_unlock() do { } while(0)
414 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
416 #if defined(CONFIG_USER_ONLY)
417 /* Currently it is not recommended to allocate big chunks of data in
418 user mode. It will change when a dedicated libc will be used */
419 #define USE_STATIC_CODE_GEN_BUFFER
422 #ifdef USE_STATIC_CODE_GEN_BUFFER
423 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
426 static void code_gen_alloc(unsigned long tb_size
)
431 #ifdef USE_STATIC_CODE_GEN_BUFFER
432 code_gen_buffer
= static_code_gen_buffer
;
433 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
434 map_exec(code_gen_buffer
, code_gen_buffer_size
);
436 code_gen_buffer_size
= tb_size
;
437 if (code_gen_buffer_size
== 0) {
438 #if defined(CONFIG_USER_ONLY)
439 /* in user mode, phys_ram_size is not meaningful */
440 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
442 /* XXX: needs adjustments */
443 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
446 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
447 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
448 /* The code gen buffer location may have constraints depending on
449 the host cpu and OS */
450 #if defined(__linux__)
455 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
456 #if defined(__x86_64__)
458 /* Cannot map more than that */
459 if (code_gen_buffer_size
> (800 * 1024 * 1024))
460 code_gen_buffer_size
= (800 * 1024 * 1024);
461 #elif defined(__sparc_v9__)
462 // Map the buffer below 2G, so we can use direct calls and branches
464 start
= (void *) 0x60000000UL
;
465 if (code_gen_buffer_size
> (512 * 1024 * 1024))
466 code_gen_buffer_size
= (512 * 1024 * 1024);
467 #elif defined(__arm__)
468 /* Map the buffer below 32M, so we can use direct calls and branches */
470 start
= (void *) 0x01000000UL
;
471 if (code_gen_buffer_size
> 16 * 1024 * 1024)
472 code_gen_buffer_size
= 16 * 1024 * 1024;
474 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
475 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
477 if (code_gen_buffer
== MAP_FAILED
) {
478 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
482 #elif defined(__FreeBSD__) || defined(__DragonFly__)
486 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
487 #if defined(__x86_64__)
488 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
489 * 0x40000000 is free */
491 addr
= (void *)0x40000000;
492 /* Cannot map more than that */
493 if (code_gen_buffer_size
> (800 * 1024 * 1024))
494 code_gen_buffer_size
= (800 * 1024 * 1024);
496 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
497 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
499 if (code_gen_buffer
== MAP_FAILED
) {
500 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
505 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
506 map_exec(code_gen_buffer
, code_gen_buffer_size
);
508 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
509 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
510 code_gen_buffer_max_size
= code_gen_buffer_size
-
511 code_gen_max_block_size();
512 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
513 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
516 /* Must be called before using the QEMU cpus. 'tb_size' is the size
517 (in bytes) allocated to the translation buffer. Zero means default
519 void cpu_exec_init_all(unsigned long tb_size
)
522 code_gen_alloc(tb_size
);
523 code_gen_ptr
= code_gen_buffer
;
525 #if !defined(CONFIG_USER_ONLY)
530 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
532 #define CPU_COMMON_SAVE_VERSION 1
534 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
536 CPUState
*env
= opaque
;
538 cpu_synchronize_state(env
, 0);
540 qemu_put_be32s(f
, &env
->halted
);
541 qemu_put_be32s(f
, &env
->interrupt_request
);
544 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
546 CPUState
*env
= opaque
;
548 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
551 qemu_get_be32s(f
, &env
->halted
);
552 qemu_get_be32s(f
, &env
->interrupt_request
);
553 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
554 version_id is increased. */
555 env
->interrupt_request
&= ~0x01;
557 cpu_synchronize_state(env
, 1);
563 CPUState
*qemu_get_cpu(int cpu
)
565 CPUState
*env
= first_cpu
;
568 if (env
->cpu_index
== cpu
)
576 void cpu_exec_init(CPUState
*env
)
581 #if defined(CONFIG_USER_ONLY)
584 env
->next_cpu
= NULL
;
587 while (*penv
!= NULL
) {
588 penv
= &(*penv
)->next_cpu
;
591 env
->cpu_index
= cpu_index
;
593 TAILQ_INIT(&env
->breakpoints
);
594 TAILQ_INIT(&env
->watchpoints
);
596 env
->thread_id
= GetCurrentProcessId();
598 env
->thread_id
= getpid();
601 #if defined(CONFIG_USER_ONLY)
604 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
605 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
606 cpu_common_save
, cpu_common_load
, env
);
607 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
608 cpu_save
, cpu_load
, env
);
612 static inline void invalidate_page_bitmap(PageDesc
*p
)
614 if (p
->code_bitmap
) {
615 qemu_free(p
->code_bitmap
);
616 p
->code_bitmap
= NULL
;
618 p
->code_write_count
= 0;
621 /* set to NULL all the 'first_tb' fields in all PageDescs */
622 static void page_flush_tb(void)
627 for(i
= 0; i
< L1_SIZE
; i
++) {
630 for(j
= 0; j
< L2_SIZE
; j
++) {
632 invalidate_page_bitmap(p
);
639 /* flush all the translation blocks */
640 /* XXX: tb_flush is currently not thread safe */
641 void tb_flush(CPUState
*env1
)
644 #if defined(DEBUG_FLUSH)
645 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
646 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
648 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
650 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
651 cpu_abort(env1
, "Internal error: code buffer overflow\n");
655 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
656 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
659 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
662 code_gen_ptr
= code_gen_buffer
;
663 /* XXX: flush processor icache at this point if cache flush is
668 #ifdef DEBUG_TB_CHECK
670 static void tb_invalidate_check(target_ulong address
)
672 TranslationBlock
*tb
;
674 address
&= TARGET_PAGE_MASK
;
675 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
676 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
677 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
678 address
>= tb
->pc
+ tb
->size
)) {
679 printf("ERROR invalidate: address=" TARGET_FMT_lx
680 " PC=%08lx size=%04x\n",
681 address
, (long)tb
->pc
, tb
->size
);
687 /* verify that all the pages have correct rights for code */
688 static void tb_page_check(void)
690 TranslationBlock
*tb
;
691 int i
, flags1
, flags2
;
693 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
694 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
695 flags1
= page_get_flags(tb
->pc
);
696 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
697 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
698 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
699 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
707 /* invalidate one TB */
708 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
711 TranslationBlock
*tb1
;
715 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
718 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
722 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
724 TranslationBlock
*tb1
;
730 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
732 *ptb
= tb1
->page_next
[n1
];
735 ptb
= &tb1
->page_next
[n1
];
739 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
741 TranslationBlock
*tb1
, **ptb
;
744 ptb
= &tb
->jmp_next
[n
];
747 /* find tb(n) in circular list */
751 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
752 if (n1
== n
&& tb1
== tb
)
755 ptb
= &tb1
->jmp_first
;
757 ptb
= &tb1
->jmp_next
[n1
];
760 /* now we can suppress tb(n) from the list */
761 *ptb
= tb
->jmp_next
[n
];
763 tb
->jmp_next
[n
] = NULL
;
767 /* reset the jump entry 'n' of a TB so that it is not chained to
769 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
771 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
774 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
779 target_phys_addr_t phys_pc
;
780 TranslationBlock
*tb1
, *tb2
;
782 /* remove the TB from the hash list */
783 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
784 h
= tb_phys_hash_func(phys_pc
);
785 tb_remove(&tb_phys_hash
[h
], tb
,
786 offsetof(TranslationBlock
, phys_hash_next
));
788 /* remove the TB from the page list */
789 if (tb
->page_addr
[0] != page_addr
) {
790 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
791 tb_page_remove(&p
->first_tb
, tb
);
792 invalidate_page_bitmap(p
);
794 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
795 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
796 tb_page_remove(&p
->first_tb
, tb
);
797 invalidate_page_bitmap(p
);
800 tb_invalidated_flag
= 1;
802 /* remove the TB from the hash list */
803 h
= tb_jmp_cache_hash_func(tb
->pc
);
804 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
805 if (env
->tb_jmp_cache
[h
] == tb
)
806 env
->tb_jmp_cache
[h
] = NULL
;
809 /* suppress this TB from the two jump lists */
810 tb_jmp_remove(tb
, 0);
811 tb_jmp_remove(tb
, 1);
813 /* suppress any remaining jumps to this TB */
819 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
820 tb2
= tb1
->jmp_next
[n1
];
821 tb_reset_jump(tb1
, n1
);
822 tb1
->jmp_next
[n1
] = NULL
;
825 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
827 tb_phys_invalidate_count
++;
830 static inline void set_bits(uint8_t *tab
, int start
, int len
)
836 mask
= 0xff << (start
& 7);
837 if ((start
& ~7) == (end
& ~7)) {
839 mask
&= ~(0xff << (end
& 7));
844 start
= (start
+ 8) & ~7;
846 while (start
< end1
) {
851 mask
= ~(0xff << (end
& 7));
857 static void build_page_bitmap(PageDesc
*p
)
859 int n
, tb_start
, tb_end
;
860 TranslationBlock
*tb
;
862 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
867 tb
= (TranslationBlock
*)((long)tb
& ~3);
868 /* NOTE: this is subtle as a TB may span two physical pages */
870 /* NOTE: tb_end may be after the end of the page, but
871 it is not a problem */
872 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
873 tb_end
= tb_start
+ tb
->size
;
874 if (tb_end
> TARGET_PAGE_SIZE
)
875 tb_end
= TARGET_PAGE_SIZE
;
878 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
880 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
881 tb
= tb
->page_next
[n
];
885 TranslationBlock
*tb_gen_code(CPUState
*env
,
886 target_ulong pc
, target_ulong cs_base
,
887 int flags
, int cflags
)
889 TranslationBlock
*tb
;
891 target_ulong phys_pc
, phys_page2
, virt_page2
;
894 phys_pc
= get_phys_addr_code(env
, pc
);
897 /* flush must be done */
899 /* cannot fail at this point */
901 /* Don't forget to invalidate previous TB info. */
902 tb_invalidated_flag
= 1;
904 tc_ptr
= code_gen_ptr
;
906 tb
->cs_base
= cs_base
;
909 cpu_gen_code(env
, tb
, &code_gen_size
);
910 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
912 /* check next page if needed */
913 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
915 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
916 phys_page2
= get_phys_addr_code(env
, virt_page2
);
918 tb_link_phys(tb
, phys_pc
, phys_page2
);
922 /* invalidate all TBs which intersect with the target physical page
923 starting in range [start;end[. NOTE: start and end must refer to
924 the same physical page. 'is_cpu_write_access' should be true if called
925 from a real cpu write access: the virtual CPU will exit the current
926 TB if code is modified inside this TB. */
927 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
928 int is_cpu_write_access
)
930 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
931 CPUState
*env
= cpu_single_env
;
932 target_ulong tb_start
, tb_end
;
935 #ifdef TARGET_HAS_PRECISE_SMC
936 int current_tb_not_found
= is_cpu_write_access
;
937 TranslationBlock
*current_tb
= NULL
;
938 int current_tb_modified
= 0;
939 target_ulong current_pc
= 0;
940 target_ulong current_cs_base
= 0;
941 int current_flags
= 0;
942 #endif /* TARGET_HAS_PRECISE_SMC */
944 p
= page_find(start
>> TARGET_PAGE_BITS
);
947 if (!p
->code_bitmap
&&
948 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
949 is_cpu_write_access
) {
950 /* build code bitmap */
951 build_page_bitmap(p
);
954 /* we remove all the TBs in the range [start, end[ */
955 /* XXX: see if in some cases it could be faster to invalidate all the code */
959 tb
= (TranslationBlock
*)((long)tb
& ~3);
960 tb_next
= tb
->page_next
[n
];
961 /* NOTE: this is subtle as a TB may span two physical pages */
963 /* NOTE: tb_end may be after the end of the page, but
964 it is not a problem */
965 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
966 tb_end
= tb_start
+ tb
->size
;
968 tb_start
= tb
->page_addr
[1];
969 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
971 if (!(tb_end
<= start
|| tb_start
>= end
)) {
972 #ifdef TARGET_HAS_PRECISE_SMC
973 if (current_tb_not_found
) {
974 current_tb_not_found
= 0;
976 if (env
->mem_io_pc
) {
977 /* now we have a real cpu fault */
978 current_tb
= tb_find_pc(env
->mem_io_pc
);
981 if (current_tb
== tb
&&
982 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
983 /* If we are modifying the current TB, we must stop
984 its execution. We could be more precise by checking
985 that the modification is after the current PC, but it
986 would require a specialized function to partially
987 restore the CPU state */
989 current_tb_modified
= 1;
990 cpu_restore_state(current_tb
, env
,
991 env
->mem_io_pc
, NULL
);
992 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
995 #endif /* TARGET_HAS_PRECISE_SMC */
996 /* we need to do that to handle the case where a signal
997 occurs while doing tb_phys_invalidate() */
1000 saved_tb
= env
->current_tb
;
1001 env
->current_tb
= NULL
;
1003 tb_phys_invalidate(tb
, -1);
1005 env
->current_tb
= saved_tb
;
1006 if (env
->interrupt_request
&& env
->current_tb
)
1007 cpu_interrupt(env
, env
->interrupt_request
);
1012 #if !defined(CONFIG_USER_ONLY)
1013 /* if no code remaining, no need to continue to use slow writes */
1015 invalidate_page_bitmap(p
);
1016 if (is_cpu_write_access
) {
1017 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1021 #ifdef TARGET_HAS_PRECISE_SMC
1022 if (current_tb_modified
) {
1023 /* we generate a block containing just the instruction
1024 modifying the memory. It will ensure that it cannot modify
1026 env
->current_tb
= NULL
;
1027 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1028 cpu_resume_from_signal(env
, NULL
);
1033 /* len must be <= 8 and start must be a multiple of len */
1034 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1040 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1041 cpu_single_env
->mem_io_vaddr
, len
,
1042 cpu_single_env
->eip
,
1043 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1046 p
= page_find(start
>> TARGET_PAGE_BITS
);
1049 if (p
->code_bitmap
) {
1050 offset
= start
& ~TARGET_PAGE_MASK
;
1051 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1052 if (b
& ((1 << len
) - 1))
1056 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1060 #if !defined(CONFIG_SOFTMMU)
1061 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1062 unsigned long pc
, void *puc
)
1064 TranslationBlock
*tb
;
1067 #ifdef TARGET_HAS_PRECISE_SMC
1068 TranslationBlock
*current_tb
= NULL
;
1069 CPUState
*env
= cpu_single_env
;
1070 int current_tb_modified
= 0;
1071 target_ulong current_pc
= 0;
1072 target_ulong current_cs_base
= 0;
1073 int current_flags
= 0;
1076 addr
&= TARGET_PAGE_MASK
;
1077 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1081 #ifdef TARGET_HAS_PRECISE_SMC
1082 if (tb
&& pc
!= 0) {
1083 current_tb
= tb_find_pc(pc
);
1086 while (tb
!= NULL
) {
1088 tb
= (TranslationBlock
*)((long)tb
& ~3);
1089 #ifdef TARGET_HAS_PRECISE_SMC
1090 if (current_tb
== tb
&&
1091 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1092 /* If we are modifying the current TB, we must stop
1093 its execution. We could be more precise by checking
1094 that the modification is after the current PC, but it
1095 would require a specialized function to partially
1096 restore the CPU state */
1098 current_tb_modified
= 1;
1099 cpu_restore_state(current_tb
, env
, pc
, puc
);
1100 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1103 #endif /* TARGET_HAS_PRECISE_SMC */
1104 tb_phys_invalidate(tb
, addr
);
1105 tb
= tb
->page_next
[n
];
1108 #ifdef TARGET_HAS_PRECISE_SMC
1109 if (current_tb_modified
) {
1110 /* we generate a block containing just the instruction
1111 modifying the memory. It will ensure that it cannot modify
1113 env
->current_tb
= NULL
;
1114 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1115 cpu_resume_from_signal(env
, puc
);
1121 /* add the tb in the target page and protect it if necessary */
1122 static inline void tb_alloc_page(TranslationBlock
*tb
,
1123 unsigned int n
, target_ulong page_addr
)
1126 TranslationBlock
*last_first_tb
;
1128 tb
->page_addr
[n
] = page_addr
;
1129 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1130 tb
->page_next
[n
] = p
->first_tb
;
1131 last_first_tb
= p
->first_tb
;
1132 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1133 invalidate_page_bitmap(p
);
1135 #if defined(TARGET_HAS_SMC) || 1
1137 #if defined(CONFIG_USER_ONLY)
1138 if (p
->flags
& PAGE_WRITE
) {
1143 /* force the host page as non writable (writes will have a
1144 page fault + mprotect overhead) */
1145 page_addr
&= qemu_host_page_mask
;
1147 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1148 addr
+= TARGET_PAGE_SIZE
) {
1150 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1154 p2
->flags
&= ~PAGE_WRITE
;
1155 page_get_flags(addr
);
1157 mprotect(g2h(page_addr
), qemu_host_page_size
,
1158 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1159 #ifdef DEBUG_TB_INVALIDATE
1160 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1165 /* if some code is already present, then the pages are already
1166 protected. So we handle the case where only the first TB is
1167 allocated in a physical page */
1168 if (!last_first_tb
) {
1169 tlb_protect_code(page_addr
);
1173 #endif /* TARGET_HAS_SMC */
1176 /* Allocate a new translation block. Flush the translation buffer if
1177 too many translation blocks or too much generated code. */
1178 TranslationBlock
*tb_alloc(target_ulong pc
)
1180 TranslationBlock
*tb
;
1182 if (nb_tbs
>= code_gen_max_blocks
||
1183 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1185 tb
= &tbs
[nb_tbs
++];
1191 void tb_free(TranslationBlock
*tb
)
1193 /* In practice this is mostly used for single use temporary TB
1194 Ignore the hard cases and just back up if this TB happens to
1195 be the last one generated. */
1196 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1197 code_gen_ptr
= tb
->tc_ptr
;
1202 /* add a new TB and link it to the physical page tables. phys_page2 is
1203 (-1) to indicate that only one page contains the TB. */
1204 void tb_link_phys(TranslationBlock
*tb
,
1205 target_ulong phys_pc
, target_ulong phys_page2
)
1208 TranslationBlock
**ptb
;
1210 /* Grab the mmap lock to stop another thread invalidating this TB
1211 before we are done. */
1213 /* add in the physical hash table */
1214 h
= tb_phys_hash_func(phys_pc
);
1215 ptb
= &tb_phys_hash
[h
];
1216 tb
->phys_hash_next
= *ptb
;
1219 /* add in the page list */
1220 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1221 if (phys_page2
!= -1)
1222 tb_alloc_page(tb
, 1, phys_page2
);
1224 tb
->page_addr
[1] = -1;
1226 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1227 tb
->jmp_next
[0] = NULL
;
1228 tb
->jmp_next
[1] = NULL
;
1230 /* init original jump addresses */
1231 if (tb
->tb_next_offset
[0] != 0xffff)
1232 tb_reset_jump(tb
, 0);
1233 if (tb
->tb_next_offset
[1] != 0xffff)
1234 tb_reset_jump(tb
, 1);
1236 #ifdef DEBUG_TB_CHECK
1242 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1243 tb[1].tc_ptr. Return NULL if not found */
1244 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1246 int m_min
, m_max
, m
;
1248 TranslationBlock
*tb
;
1252 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1253 tc_ptr
>= (unsigned long)code_gen_ptr
)
1255 /* binary search (cf Knuth) */
1258 while (m_min
<= m_max
) {
1259 m
= (m_min
+ m_max
) >> 1;
1261 v
= (unsigned long)tb
->tc_ptr
;
1264 else if (tc_ptr
< v
) {
1273 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1275 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1277 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1280 tb1
= tb
->jmp_next
[n
];
1282 /* find head of list */
1285 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1288 tb1
= tb1
->jmp_next
[n1
];
1290 /* we are now sure now that tb jumps to tb1 */
1293 /* remove tb from the jmp_first list */
1294 ptb
= &tb_next
->jmp_first
;
1298 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1299 if (n1
== n
&& tb1
== tb
)
1301 ptb
= &tb1
->jmp_next
[n1
];
1303 *ptb
= tb
->jmp_next
[n
];
1304 tb
->jmp_next
[n
] = NULL
;
1306 /* suppress the jump to next tb in generated code */
1307 tb_reset_jump(tb
, n
);
1309 /* suppress jumps in the tb on which we could have jumped */
1310 tb_reset_jump_recursive(tb_next
);
1314 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1316 tb_reset_jump_recursive2(tb
, 0);
1317 tb_reset_jump_recursive2(tb
, 1);
1320 #if defined(TARGET_HAS_ICE)
1321 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1323 target_phys_addr_t addr
;
1325 ram_addr_t ram_addr
;
1328 addr
= cpu_get_phys_page_debug(env
, pc
);
1329 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1331 pd
= IO_MEM_UNASSIGNED
;
1333 pd
= p
->phys_offset
;
1335 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1336 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1340 /* Add a watchpoint. */
1341 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1342 int flags
, CPUWatchpoint
**watchpoint
)
1344 target_ulong len_mask
= ~(len
- 1);
1347 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1348 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1349 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1350 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1353 wp
= qemu_malloc(sizeof(*wp
));
1356 wp
->len_mask
= len_mask
;
1359 /* keep all GDB-injected watchpoints in front */
1361 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1363 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1365 tlb_flush_page(env
, addr
);
1372 /* Remove a specific watchpoint. */
1373 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1376 target_ulong len_mask
= ~(len
- 1);
1379 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1380 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1381 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1382 cpu_watchpoint_remove_by_ref(env
, wp
);
1389 /* Remove a specific watchpoint by reference. */
1390 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1392 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1394 tlb_flush_page(env
, watchpoint
->vaddr
);
1396 qemu_free(watchpoint
);
1399 /* Remove all matching watchpoints. */
1400 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1402 CPUWatchpoint
*wp
, *next
;
1404 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1405 if (wp
->flags
& mask
)
1406 cpu_watchpoint_remove_by_ref(env
, wp
);
1410 /* Add a breakpoint. */
1411 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1412 CPUBreakpoint
**breakpoint
)
1414 #if defined(TARGET_HAS_ICE)
1417 bp
= qemu_malloc(sizeof(*bp
));
1422 /* keep all GDB-injected breakpoints in front */
1424 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1426 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1428 breakpoint_invalidate(env
, pc
);
1438 /* Remove a specific breakpoint. */
1439 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1441 #if defined(TARGET_HAS_ICE)
1444 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1445 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1446 cpu_breakpoint_remove_by_ref(env
, bp
);
1456 /* Remove a specific breakpoint by reference. */
1457 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1459 #if defined(TARGET_HAS_ICE)
1460 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1462 breakpoint_invalidate(env
, breakpoint
->pc
);
1464 qemu_free(breakpoint
);
1468 /* Remove all matching breakpoints. */
1469 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1471 #if defined(TARGET_HAS_ICE)
1472 CPUBreakpoint
*bp
, *next
;
1474 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1475 if (bp
->flags
& mask
)
1476 cpu_breakpoint_remove_by_ref(env
, bp
);
1481 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1482 CPU loop after each instruction */
1483 void cpu_single_step(CPUState
*env
, int enabled
)
1485 #if defined(TARGET_HAS_ICE)
1486 if (env
->singlestep_enabled
!= enabled
) {
1487 env
->singlestep_enabled
= enabled
;
1489 kvm_update_guest_debug(env
, 0);
1491 /* must flush all the translated code to avoid inconsistencies */
1492 /* XXX: only flush what is necessary */
1499 /* enable or disable low levels log */
1500 void cpu_set_log(int log_flags
)
1502 loglevel
= log_flags
;
1503 if (loglevel
&& !logfile
) {
1504 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1506 perror(logfilename
);
1509 #if !defined(CONFIG_SOFTMMU)
1510 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1512 static char logfile_buf
[4096];
1513 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1516 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1520 if (!loglevel
&& logfile
) {
1526 void cpu_set_log_filename(const char *filename
)
1528 logfilename
= strdup(filename
);
1533 cpu_set_log(loglevel
);
1536 static void cpu_unlink_tb(CPUState
*env
)
1538 #if defined(USE_NPTL)
1539 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1540 problem and hope the cpu will stop of its own accord. For userspace
1541 emulation this often isn't actually as bad as it sounds. Often
1542 signals are used primarily to interrupt blocking syscalls. */
1544 TranslationBlock
*tb
;
1545 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1547 tb
= env
->current_tb
;
1548 /* if the cpu is currently executing code, we must unlink it and
1549 all the potentially executing TB */
1550 if (tb
&& !testandset(&interrupt_lock
)) {
1551 env
->current_tb
= NULL
;
1552 tb_reset_jump_recursive(tb
);
1553 resetlock(&interrupt_lock
);
1558 /* mask must never be zero, except for A20 change call */
1559 void cpu_interrupt(CPUState
*env
, int mask
)
1563 old_mask
= env
->interrupt_request
;
1564 env
->interrupt_request
|= mask
;
1565 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1566 kvm_update_interrupt_request(env
);
1568 #ifndef CONFIG_USER_ONLY
1570 * If called from iothread context, wake the target cpu in
1573 if (!qemu_cpu_self(env
)) {
1580 env
->icount_decr
.u16
.high
= 0xffff;
1581 #ifndef CONFIG_USER_ONLY
1583 && (mask
& ~old_mask
) != 0) {
1584 cpu_abort(env
, "Raised interrupt while not in I/O function");
1592 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1594 env
->interrupt_request
&= ~mask
;
1597 void cpu_exit(CPUState
*env
)
1599 env
->exit_request
= 1;
1603 const CPULogItem cpu_log_items
[] = {
1604 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1605 "show generated host assembly code for each compiled TB" },
1606 { CPU_LOG_TB_IN_ASM
, "in_asm",
1607 "show target assembly code for each compiled TB" },
1608 { CPU_LOG_TB_OP
, "op",
1609 "show micro ops for each compiled TB" },
1610 { CPU_LOG_TB_OP_OPT
, "op_opt",
1613 "before eflags optimization and "
1615 "after liveness analysis" },
1616 { CPU_LOG_INT
, "int",
1617 "show interrupts/exceptions in short format" },
1618 { CPU_LOG_EXEC
, "exec",
1619 "show trace before each executed TB (lots of logs)" },
1620 { CPU_LOG_TB_CPU
, "cpu",
1621 "show CPU state before block translation" },
1623 { CPU_LOG_PCALL
, "pcall",
1624 "show protected mode far calls/returns/exceptions" },
1625 { CPU_LOG_RESET
, "cpu_reset",
1626 "show CPU state before CPU resets" },
1629 { CPU_LOG_IOPORT
, "ioport",
1630 "show all i/o ports accesses" },
1635 static int cmp1(const char *s1
, int n
, const char *s2
)
1637 if (strlen(s2
) != n
)
1639 return memcmp(s1
, s2
, n
) == 0;
1642 /* takes a comma separated list of log masks. Return 0 if error. */
1643 int cpu_str_to_log_mask(const char *str
)
1645 const CPULogItem
*item
;
1652 p1
= strchr(p
, ',');
1655 if(cmp1(p
,p1
-p
,"all")) {
1656 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1660 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1661 if (cmp1(p
, p1
- p
, item
->name
))
1675 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1682 fprintf(stderr
, "qemu: fatal: ");
1683 vfprintf(stderr
, fmt
, ap
);
1684 fprintf(stderr
, "\n");
1686 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1688 cpu_dump_state(env
, stderr
, fprintf
, 0);
1690 if (qemu_log_enabled()) {
1691 qemu_log("qemu: fatal: ");
1692 qemu_log_vprintf(fmt
, ap2
);
1695 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1697 log_cpu_state(env
, 0);
1707 CPUState
*cpu_copy(CPUState
*env
)
1709 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1710 CPUState
*next_cpu
= new_env
->next_cpu
;
1711 int cpu_index
= new_env
->cpu_index
;
1712 #if defined(TARGET_HAS_ICE)
1717 memcpy(new_env
, env
, sizeof(CPUState
));
1719 /* Preserve chaining and index. */
1720 new_env
->next_cpu
= next_cpu
;
1721 new_env
->cpu_index
= cpu_index
;
1723 /* Clone all break/watchpoints.
1724 Note: Once we support ptrace with hw-debug register access, make sure
1725 BP_CPU break/watchpoints are handled correctly on clone. */
1726 TAILQ_INIT(&env
->breakpoints
);
1727 TAILQ_INIT(&env
->watchpoints
);
1728 #if defined(TARGET_HAS_ICE)
1729 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1730 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1732 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1733 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1741 #if !defined(CONFIG_USER_ONLY)
1743 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1747 /* Discard jump cache entries for any tb which might potentially
1748 overlap the flushed page. */
1749 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1750 memset (&env
->tb_jmp_cache
[i
], 0,
1751 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1753 i
= tb_jmp_cache_hash_page(addr
);
1754 memset (&env
->tb_jmp_cache
[i
], 0,
1755 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1758 static CPUTLBEntry s_cputlb_empty_entry
= {
1765 /* NOTE: if flush_global is true, also flush global entries (not
1767 void tlb_flush(CPUState
*env
, int flush_global
)
1771 #if defined(DEBUG_TLB)
1772 printf("tlb_flush:\n");
1774 /* must reset current TB so that interrupts cannot modify the
1775 links while we are modifying them */
1776 env
->current_tb
= NULL
;
1778 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1780 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1781 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1785 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1788 if (env
->kqemu_enabled
) {
1789 kqemu_flush(env
, flush_global
);
1795 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1797 if (addr
== (tlb_entry
->addr_read
&
1798 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1799 addr
== (tlb_entry
->addr_write
&
1800 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1801 addr
== (tlb_entry
->addr_code
&
1802 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1803 *tlb_entry
= s_cputlb_empty_entry
;
1807 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1812 #if defined(DEBUG_TLB)
1813 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1815 /* must reset current TB so that interrupts cannot modify the
1816 links while we are modifying them */
1817 env
->current_tb
= NULL
;
1819 addr
&= TARGET_PAGE_MASK
;
1820 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1821 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1822 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1824 tlb_flush_jmp_cache(env
, addr
);
1827 if (env
->kqemu_enabled
) {
1828 kqemu_flush_page(env
, addr
);
1833 /* update the TLBs so that writes to code in the virtual page 'addr'
1835 static void tlb_protect_code(ram_addr_t ram_addr
)
1837 cpu_physical_memory_reset_dirty(ram_addr
,
1838 ram_addr
+ TARGET_PAGE_SIZE
,
1842 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1843 tested for self modifying code */
1844 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1847 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1850 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1851 unsigned long start
, unsigned long length
)
1854 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1855 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1856 if ((addr
- start
) < length
) {
1857 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1862 /* Note: start and end must be within the same ram block. */
1863 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1867 unsigned long length
, start1
;
1871 start
&= TARGET_PAGE_MASK
;
1872 end
= TARGET_PAGE_ALIGN(end
);
1874 length
= end
- start
;
1877 len
= length
>> TARGET_PAGE_BITS
;
1879 /* XXX: should not depend on cpu context */
1881 if (env
->kqemu_enabled
) {
1884 for(i
= 0; i
< len
; i
++) {
1885 kqemu_set_notdirty(env
, addr
);
1886 addr
+= TARGET_PAGE_SIZE
;
1890 mask
= ~dirty_flags
;
1891 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1892 for(i
= 0; i
< len
; i
++)
1895 /* we modify the TLB cache so that the dirty bit will be set again
1896 when accessing the range */
1897 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1898 /* Chek that we don't span multiple blocks - this breaks the
1899 address comparisons below. */
1900 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1901 != (end
- 1) - start
) {
1905 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1907 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1908 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1909 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1915 int cpu_physical_memory_set_dirty_tracking(int enable
)
1917 if (kvm_enabled()) {
1918 return kvm_set_migration_log(enable
);
1923 int cpu_physical_memory_get_dirty_tracking(void)
1925 return in_migration
;
1928 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
1929 target_phys_addr_t end_addr
)
1934 ret
= kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1938 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1940 ram_addr_t ram_addr
;
1943 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1944 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
1945 + tlb_entry
->addend
);
1946 ram_addr
= qemu_ram_addr_from_host(p
);
1947 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1948 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1953 /* update the TLB according to the current state of the dirty bits */
1954 void cpu_tlb_update_dirty(CPUState
*env
)
1958 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1959 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1960 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
1964 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1966 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1967 tlb_entry
->addr_write
= vaddr
;
1970 /* update the TLB corresponding to virtual page vaddr
1971 so that it is no longer dirty */
1972 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1977 vaddr
&= TARGET_PAGE_MASK
;
1978 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1979 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1980 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
1983 /* add a new TLB entry. At most one entry for a given virtual address
1984 is permitted. Return 0 if OK or 2 if the page could not be mapped
1985 (can only happen in non SOFTMMU mode for I/O pages or pages
1986 conflicting with the host address space). */
1987 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1988 target_phys_addr_t paddr
, int prot
,
1989 int mmu_idx
, int is_softmmu
)
1994 target_ulong address
;
1995 target_ulong code_address
;
1996 target_phys_addr_t addend
;
2000 target_phys_addr_t iotlb
;
2002 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2004 pd
= IO_MEM_UNASSIGNED
;
2006 pd
= p
->phys_offset
;
2008 #if defined(DEBUG_TLB)
2009 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2010 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2015 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2016 /* IO memory case (romd handled later) */
2017 address
|= TLB_MMIO
;
2019 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2020 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2022 iotlb
= pd
& TARGET_PAGE_MASK
;
2023 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2024 iotlb
|= IO_MEM_NOTDIRTY
;
2026 iotlb
|= IO_MEM_ROM
;
2028 /* IO handlers are currently passed a physical address.
2029 It would be nice to pass an offset from the base address
2030 of that region. This would avoid having to special case RAM,
2031 and avoid full address decoding in every device.
2032 We can't use the high bits of pd for this because
2033 IO_MEM_ROMD uses these as a ram address. */
2034 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2036 iotlb
+= p
->region_offset
;
2042 code_address
= address
;
2043 /* Make accesses to pages with watchpoints go via the
2044 watchpoint trap routines. */
2045 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2046 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2047 iotlb
= io_mem_watch
+ paddr
;
2048 /* TODO: The memory case can be optimized by not trapping
2049 reads of pages with a write breakpoint. */
2050 address
|= TLB_MMIO
;
2054 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2055 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2056 te
= &env
->tlb_table
[mmu_idx
][index
];
2057 te
->addend
= addend
- vaddr
;
2058 if (prot
& PAGE_READ
) {
2059 te
->addr_read
= address
;
2064 if (prot
& PAGE_EXEC
) {
2065 te
->addr_code
= code_address
;
2069 if (prot
& PAGE_WRITE
) {
2070 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2071 (pd
& IO_MEM_ROMD
)) {
2072 /* Write access calls the I/O callback. */
2073 te
->addr_write
= address
| TLB_MMIO
;
2074 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2075 !cpu_physical_memory_is_dirty(pd
)) {
2076 te
->addr_write
= address
| TLB_NOTDIRTY
;
2078 te
->addr_write
= address
;
2081 te
->addr_write
= -1;
2088 void tlb_flush(CPUState
*env
, int flush_global
)
2092 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2096 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2097 target_phys_addr_t paddr
, int prot
,
2098 int mmu_idx
, int is_softmmu
)
2104 * Walks guest process memory "regions" one by one
2105 * and calls callback function 'fn' for each region.
2107 int walk_memory_regions(void *priv
,
2108 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2110 unsigned long start
, end
;
2112 int i
, j
, prot
, prot1
;
2118 for (i
= 0; i
<= L1_SIZE
; i
++) {
2119 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2120 for (j
= 0; j
< L2_SIZE
; j
++) {
2121 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2123 * "region" is one continuous chunk of memory
2124 * that has same protection flags set.
2126 if (prot1
!= prot
) {
2127 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2129 rc
= (*fn
)(priv
, start
, end
, prot
);
2130 /* callback can stop iteration by returning != 0 */
2147 static int dump_region(void *priv
, unsigned long start
,
2148 unsigned long end
, unsigned long prot
)
2150 FILE *f
= (FILE *)priv
;
2152 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2153 start
, end
, end
- start
,
2154 ((prot
& PAGE_READ
) ? 'r' : '-'),
2155 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2156 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2161 /* dump memory mappings */
2162 void page_dump(FILE *f
)
2164 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2165 "start", "end", "size", "prot");
2166 walk_memory_regions(f
, dump_region
);
2169 int page_get_flags(target_ulong address
)
2173 p
= page_find(address
>> TARGET_PAGE_BITS
);
2179 /* modify the flags of a page and invalidate the code if
2180 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2181 depending on PAGE_WRITE */
2182 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2187 /* mmap_lock should already be held. */
2188 start
= start
& TARGET_PAGE_MASK
;
2189 end
= TARGET_PAGE_ALIGN(end
);
2190 if (flags
& PAGE_WRITE
)
2191 flags
|= PAGE_WRITE_ORG
;
2192 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2193 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2194 /* We may be called for host regions that are outside guest
2198 /* if the write protection is set, then we invalidate the code
2200 if (!(p
->flags
& PAGE_WRITE
) &&
2201 (flags
& PAGE_WRITE
) &&
2203 tb_invalidate_phys_page(addr
, 0, NULL
);
2209 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2215 if (start
+ len
< start
)
2216 /* we've wrapped around */
2219 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2220 start
= start
& TARGET_PAGE_MASK
;
2222 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2223 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2226 if( !(p
->flags
& PAGE_VALID
) )
2229 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2231 if (flags
& PAGE_WRITE
) {
2232 if (!(p
->flags
& PAGE_WRITE_ORG
))
2234 /* unprotect the page if it was put read-only because it
2235 contains translated code */
2236 if (!(p
->flags
& PAGE_WRITE
)) {
2237 if (!page_unprotect(addr
, 0, NULL
))
2246 /* called from signal handler: invalidate the code and unprotect the
2247 page. Return TRUE if the fault was successfully handled. */
2248 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2250 unsigned int page_index
, prot
, pindex
;
2252 target_ulong host_start
, host_end
, addr
;
2254 /* Technically this isn't safe inside a signal handler. However we
2255 know this only ever happens in a synchronous SEGV handler, so in
2256 practice it seems to be ok. */
2259 host_start
= address
& qemu_host_page_mask
;
2260 page_index
= host_start
>> TARGET_PAGE_BITS
;
2261 p1
= page_find(page_index
);
2266 host_end
= host_start
+ qemu_host_page_size
;
2269 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2273 /* if the page was really writable, then we change its
2274 protection back to writable */
2275 if (prot
& PAGE_WRITE_ORG
) {
2276 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2277 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2278 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2279 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2280 p1
[pindex
].flags
|= PAGE_WRITE
;
2281 /* and since the content will be modified, we must invalidate
2282 the corresponding translated code. */
2283 tb_invalidate_phys_page(address
, pc
, puc
);
2284 #ifdef DEBUG_TB_CHECK
2285 tb_invalidate_check(address
);
2295 static inline void tlb_set_dirty(CPUState
*env
,
2296 unsigned long addr
, target_ulong vaddr
)
2299 #endif /* defined(CONFIG_USER_ONLY) */
2301 #if !defined(CONFIG_USER_ONLY)
2303 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2304 ram_addr_t memory
, ram_addr_t region_offset
);
2305 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2306 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2307 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2310 if (addr > start_addr) \
2313 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2314 if (start_addr2 > 0) \
2318 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2319 end_addr2 = TARGET_PAGE_SIZE - 1; \
2321 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2322 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2327 /* register physical memory. 'size' must be a multiple of the target
2328 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2329 io memory page. The address used when calling the IO function is
2330 the offset from the start of the region, plus region_offset. Both
2331 start_addr and region_offset are rounded down to a page boundary
2332 before calculating this offset. This should not be a problem unless
2333 the low bits of start_addr and region_offset differ. */
2334 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2336 ram_addr_t phys_offset
,
2337 ram_addr_t region_offset
)
2339 target_phys_addr_t addr
, end_addr
;
2342 ram_addr_t orig_size
= size
;
2346 /* XXX: should not depend on cpu context */
2348 if (env
->kqemu_enabled
) {
2349 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2353 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2355 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2356 region_offset
= start_addr
;
2358 region_offset
&= TARGET_PAGE_MASK
;
2359 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2360 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2361 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2362 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2363 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2364 ram_addr_t orig_memory
= p
->phys_offset
;
2365 target_phys_addr_t start_addr2
, end_addr2
;
2366 int need_subpage
= 0;
2368 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2370 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2371 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2372 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2373 &p
->phys_offset
, orig_memory
,
2376 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2379 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2381 p
->region_offset
= 0;
2383 p
->phys_offset
= phys_offset
;
2384 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2385 (phys_offset
& IO_MEM_ROMD
))
2386 phys_offset
+= TARGET_PAGE_SIZE
;
2389 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2390 p
->phys_offset
= phys_offset
;
2391 p
->region_offset
= region_offset
;
2392 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2393 (phys_offset
& IO_MEM_ROMD
)) {
2394 phys_offset
+= TARGET_PAGE_SIZE
;
2396 target_phys_addr_t start_addr2
, end_addr2
;
2397 int need_subpage
= 0;
2399 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2400 end_addr2
, need_subpage
);
2402 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2403 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2404 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2405 addr
& TARGET_PAGE_MASK
);
2406 subpage_register(subpage
, start_addr2
, end_addr2
,
2407 phys_offset
, region_offset
);
2408 p
->region_offset
= 0;
2412 region_offset
+= TARGET_PAGE_SIZE
;
2415 /* since each CPU stores ram addresses in its TLB cache, we must
2416 reset the modified entries */
2418 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2423 /* XXX: temporary until new memory mapping API */
2424 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2428 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2430 return IO_MEM_UNASSIGNED
;
2431 return p
->phys_offset
;
2434 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2437 kvm_coalesce_mmio_region(addr
, size
);
2440 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2443 kvm_uncoalesce_mmio_region(addr
, size
);
2447 /* XXX: better than nothing */
2448 static ram_addr_t
kqemu_ram_alloc(ram_addr_t size
)
2451 if ((last_ram_offset
+ size
) > kqemu_phys_ram_size
) {
2452 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2453 (uint64_t)size
, (uint64_t)kqemu_phys_ram_size
);
2456 addr
= last_ram_offset
;
2457 last_ram_offset
= TARGET_PAGE_ALIGN(last_ram_offset
+ size
);
2464 #include <sys/vfs.h>
2466 #define HUGETLBFS_MAGIC 0x958458f6
2468 static long gethugepagesize(const char *path
)
2474 ret
= statfs(path
, &fs
);
2475 } while (ret
!= 0 && errno
== EINTR
);
2482 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2483 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2488 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2496 unsigned long hpagesize
;
2497 extern int mem_prealloc
;
2503 hpagesize
= gethugepagesize(path
);
2508 if (memory
< hpagesize
) {
2512 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2513 fprintf(stderr
, "host lacks mmu notifiers, disabling --mem-path\n");
2517 if (asprintf(&filename
, "%s/kvm.XXXXXX", path
) == -1) {
2521 fd
= mkstemp(filename
);
2530 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2533 * ftruncate is not supported by hugetlbfs in older
2534 * hosts, so don't bother checking for errors.
2535 * If anything goes wrong with it under other filesystems,
2538 ftruncate(fd
, memory
);
2541 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2542 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2543 * to sidestep this quirk.
2545 flags
= mem_prealloc
? MAP_POPULATE
|MAP_SHARED
: MAP_PRIVATE
;
2546 area
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, flags
, fd
, 0);
2548 area
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2550 if (area
== MAP_FAILED
) {
2551 perror("alloc_mem_area: can't mmap hugetlbfs pages");
2560 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2567 extern const char *mem_path
;
2569 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2571 RAMBlock
*new_block
;
2574 if (kqemu_phys_ram_base
) {
2575 return kqemu_ram_alloc(size
);
2579 size
= TARGET_PAGE_ALIGN(size
);
2580 new_block
= qemu_malloc(sizeof(*new_block
));
2582 new_block
->host
= file_ram_alloc(size
, mem_path
);
2583 if (!new_block
->host
) {
2584 new_block
->host
= qemu_vmalloc(size
);
2585 #ifdef MADV_MERGEABLE
2586 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2589 new_block
->offset
= last_ram_offset
;
2590 new_block
->length
= size
;
2592 new_block
->next
= ram_blocks
;
2593 ram_blocks
= new_block
;
2595 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2596 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2597 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2598 0xff, size
>> TARGET_PAGE_BITS
);
2600 last_ram_offset
+= size
;
2603 kvm_setup_guest_memory(new_block
->host
, size
);
2605 return new_block
->offset
;
2608 void qemu_ram_free(ram_addr_t addr
)
2610 /* TODO: implement this. */
2613 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2614 With the exception of the softmmu code in this file, this should
2615 only be used for local memory (e.g. video ram) that the device owns,
2616 and knows it isn't going to access beyond the end of the block.
2618 It should not be used for general purpose DMA.
2619 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2621 void *qemu_get_ram_ptr(ram_addr_t addr
)
2628 if (kqemu_phys_ram_base
) {
2629 return kqemu_phys_ram_base
+ addr
;
2634 prevp
= &ram_blocks
;
2636 while (block
&& (block
->offset
> addr
2637 || block
->offset
+ block
->length
<= addr
)) {
2639 prevp
= &prev
->next
;
2641 block
= block
->next
;
2644 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2647 /* Move this entry to to start of the list. */
2649 prev
->next
= block
->next
;
2650 block
->next
= *prevp
;
2653 return block
->host
+ (addr
- block
->offset
);
2656 /* Some of the softmmu routines need to translate from a host pointer
2657 (typically a TLB entry) back to a ram offset. */
2658 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2663 uint8_t *host
= ptr
;
2666 if (kqemu_phys_ram_base
) {
2667 return host
- kqemu_phys_ram_base
;
2672 prevp
= &ram_blocks
;
2674 while (block
&& (block
->host
> host
2675 || block
->host
+ block
->length
<= host
)) {
2677 prevp
= &prev
->next
;
2679 block
= block
->next
;
2682 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2685 return block
->offset
+ (host
- block
->host
);
2688 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2690 #ifdef DEBUG_UNASSIGNED
2691 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2693 #if defined(TARGET_SPARC)
2694 do_unassigned_access(addr
, 0, 0, 0, 1);
2699 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2701 #ifdef DEBUG_UNASSIGNED
2702 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2704 #if defined(TARGET_SPARC)
2705 do_unassigned_access(addr
, 0, 0, 0, 2);
2710 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2712 #ifdef DEBUG_UNASSIGNED
2713 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2715 #if defined(TARGET_SPARC)
2716 do_unassigned_access(addr
, 0, 0, 0, 4);
2721 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2723 #ifdef DEBUG_UNASSIGNED
2724 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2726 #if defined(TARGET_SPARC)
2727 do_unassigned_access(addr
, 1, 0, 0, 1);
2731 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2733 #ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2736 #if defined(TARGET_SPARC)
2737 do_unassigned_access(addr
, 1, 0, 0, 2);
2741 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2743 #ifdef DEBUG_UNASSIGNED
2744 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2746 #if defined(TARGET_SPARC)
2747 do_unassigned_access(addr
, 1, 0, 0, 4);
2751 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2752 unassigned_mem_readb
,
2753 unassigned_mem_readw
,
2754 unassigned_mem_readl
,
2757 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2758 unassigned_mem_writeb
,
2759 unassigned_mem_writew
,
2760 unassigned_mem_writel
,
2763 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2767 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2768 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2769 #if !defined(CONFIG_USER_ONLY)
2770 tb_invalidate_phys_page_fast(ram_addr
, 1);
2771 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2774 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2776 if (cpu_single_env
->kqemu_enabled
&&
2777 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2778 kqemu_modify_page(cpu_single_env
, ram_addr
);
2780 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2781 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2782 /* we remove the notdirty callback only if the code has been
2784 if (dirty_flags
== 0xff)
2785 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2788 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2792 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2793 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2794 #if !defined(CONFIG_USER_ONLY)
2795 tb_invalidate_phys_page_fast(ram_addr
, 2);
2796 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2799 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2801 if (cpu_single_env
->kqemu_enabled
&&
2802 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2803 kqemu_modify_page(cpu_single_env
, ram_addr
);
2805 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2806 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2807 /* we remove the notdirty callback only if the code has been
2809 if (dirty_flags
== 0xff)
2810 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2813 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2817 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2818 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2819 #if !defined(CONFIG_USER_ONLY)
2820 tb_invalidate_phys_page_fast(ram_addr
, 4);
2821 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2824 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2826 if (cpu_single_env
->kqemu_enabled
&&
2827 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2828 kqemu_modify_page(cpu_single_env
, ram_addr
);
2830 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2831 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2832 /* we remove the notdirty callback only if the code has been
2834 if (dirty_flags
== 0xff)
2835 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2838 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2839 NULL
, /* never used */
2840 NULL
, /* never used */
2841 NULL
, /* never used */
2844 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2845 notdirty_mem_writeb
,
2846 notdirty_mem_writew
,
2847 notdirty_mem_writel
,
2850 /* Generate a debug exception if a watchpoint has been hit. */
2851 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2853 CPUState
*env
= cpu_single_env
;
2854 target_ulong pc
, cs_base
;
2855 TranslationBlock
*tb
;
2860 if (env
->watchpoint_hit
) {
2861 /* We re-entered the check after replacing the TB. Now raise
2862 * the debug interrupt so that is will trigger after the
2863 * current instruction. */
2864 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2867 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2868 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2869 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2870 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2871 wp
->flags
|= BP_WATCHPOINT_HIT
;
2872 if (!env
->watchpoint_hit
) {
2873 env
->watchpoint_hit
= wp
;
2874 tb
= tb_find_pc(env
->mem_io_pc
);
2876 cpu_abort(env
, "check_watchpoint: could not find TB for "
2877 "pc=%p", (void *)env
->mem_io_pc
);
2879 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2880 tb_phys_invalidate(tb
, -1);
2881 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2882 env
->exception_index
= EXCP_DEBUG
;
2884 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2885 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2887 cpu_resume_from_signal(env
, NULL
);
2890 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2895 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2896 so these check for a hit then pass through to the normal out-of-line
2898 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2900 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2901 return ldub_phys(addr
);
2904 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2906 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2907 return lduw_phys(addr
);
2910 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2912 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2913 return ldl_phys(addr
);
2916 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2919 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2920 stb_phys(addr
, val
);
2923 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2926 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2927 stw_phys(addr
, val
);
2930 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2933 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2934 stl_phys(addr
, val
);
2937 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2943 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2949 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2955 idx
= SUBPAGE_IDX(addr
);
2956 #if defined(DEBUG_SUBPAGE)
2957 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2958 mmio
, len
, addr
, idx
);
2960 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2961 addr
+ mmio
->region_offset
[idx
][0][len
]);
2966 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2967 uint32_t value
, unsigned int len
)
2971 idx
= SUBPAGE_IDX(addr
);
2972 #if defined(DEBUG_SUBPAGE)
2973 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2974 mmio
, len
, addr
, idx
, value
);
2976 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2977 addr
+ mmio
->region_offset
[idx
][1][len
],
2981 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2983 #if defined(DEBUG_SUBPAGE)
2984 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2987 return subpage_readlen(opaque
, addr
, 0);
2990 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2993 #if defined(DEBUG_SUBPAGE)
2994 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2996 subpage_writelen(opaque
, addr
, value
, 0);
2999 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3001 #if defined(DEBUG_SUBPAGE)
3002 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3005 return subpage_readlen(opaque
, addr
, 1);
3008 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3011 #if defined(DEBUG_SUBPAGE)
3012 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3014 subpage_writelen(opaque
, addr
, value
, 1);
3017 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3019 #if defined(DEBUG_SUBPAGE)
3020 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3023 return subpage_readlen(opaque
, addr
, 2);
3026 static void subpage_writel (void *opaque
,
3027 target_phys_addr_t addr
, uint32_t value
)
3029 #if defined(DEBUG_SUBPAGE)
3030 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3032 subpage_writelen(opaque
, addr
, value
, 2);
3035 static CPUReadMemoryFunc
*subpage_read
[] = {
3041 static CPUWriteMemoryFunc
*subpage_write
[] = {
3047 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3048 ram_addr_t memory
, ram_addr_t region_offset
)
3053 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3055 idx
= SUBPAGE_IDX(start
);
3056 eidx
= SUBPAGE_IDX(end
);
3057 #if defined(DEBUG_SUBPAGE)
3058 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3059 mmio
, start
, end
, idx
, eidx
, memory
);
3061 memory
>>= IO_MEM_SHIFT
;
3062 for (; idx
<= eidx
; idx
++) {
3063 for (i
= 0; i
< 4; i
++) {
3064 if (io_mem_read
[memory
][i
]) {
3065 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3066 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3067 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3069 if (io_mem_write
[memory
][i
]) {
3070 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3071 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3072 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3080 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3081 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3086 mmio
= qemu_mallocz(sizeof(subpage_t
));
3089 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3090 #if defined(DEBUG_SUBPAGE)
3091 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3092 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3094 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3095 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3101 static int get_free_io_mem_idx(void)
3105 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3106 if (!io_mem_used
[i
]) {
3114 /* mem_read and mem_write are arrays of functions containing the
3115 function to access byte (index 0), word (index 1) and dword (index
3116 2). Functions can be omitted with a NULL function pointer.
3117 If io_index is non zero, the corresponding io zone is
3118 modified. If it is zero, a new io zone is allocated. The return
3119 value can be used with cpu_register_physical_memory(). (-1) is
3120 returned if error. */
3121 static int cpu_register_io_memory_fixed(int io_index
,
3122 CPUReadMemoryFunc
**mem_read
,
3123 CPUWriteMemoryFunc
**mem_write
,
3126 int i
, subwidth
= 0;
3128 if (io_index
<= 0) {
3129 io_index
= get_free_io_mem_idx();
3133 io_index
>>= IO_MEM_SHIFT
;
3134 if (io_index
>= IO_MEM_NB_ENTRIES
)
3138 for(i
= 0;i
< 3; i
++) {
3139 if (!mem_read
[i
] || !mem_write
[i
])
3140 subwidth
= IO_MEM_SUBWIDTH
;
3141 io_mem_read
[io_index
][i
] = mem_read
[i
];
3142 io_mem_write
[io_index
][i
] = mem_write
[i
];
3144 io_mem_opaque
[io_index
] = opaque
;
3145 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3148 int cpu_register_io_memory(CPUReadMemoryFunc
**mem_read
,
3149 CPUWriteMemoryFunc
**mem_write
,
3152 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3155 void cpu_unregister_io_memory(int io_table_address
)
3158 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3160 for (i
=0;i
< 3; i
++) {
3161 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3162 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3164 io_mem_opaque
[io_index
] = NULL
;
3165 io_mem_used
[io_index
] = 0;
3168 static void io_mem_init(void)
3172 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3173 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3174 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3178 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3179 watch_mem_write
, NULL
);
3181 if (kqemu_phys_ram_base
) {
3182 /* alloc dirty bits array */
3183 phys_ram_dirty
= qemu_vmalloc(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3184 memset(phys_ram_dirty
, 0xff, kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3189 #endif /* !defined(CONFIG_USER_ONLY) */
3191 /* physical memory access (slow version, mainly for debug) */
3192 #if defined(CONFIG_USER_ONLY)
3193 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3194 int len
, int is_write
)
3201 page
= addr
& TARGET_PAGE_MASK
;
3202 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3205 flags
= page_get_flags(page
);
3206 if (!(flags
& PAGE_VALID
))
3209 if (!(flags
& PAGE_WRITE
))
3211 /* XXX: this code should not depend on lock_user */
3212 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3213 /* FIXME - should this return an error rather than just fail? */
3216 unlock_user(p
, addr
, l
);
3218 if (!(flags
& PAGE_READ
))
3220 /* XXX: this code should not depend on lock_user */
3221 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3222 /* FIXME - should this return an error rather than just fail? */
3225 unlock_user(p
, addr
, 0);
3234 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3235 int len
, int is_write
)
3240 target_phys_addr_t page
;
3245 page
= addr
& TARGET_PAGE_MASK
;
3246 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3249 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3251 pd
= IO_MEM_UNASSIGNED
;
3253 pd
= p
->phys_offset
;
3257 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3258 target_phys_addr_t addr1
= addr
;
3259 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3261 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3262 /* XXX: could force cpu_single_env to NULL to avoid
3264 if (l
>= 4 && ((addr1
& 3) == 0)) {
3265 /* 32 bit write access */
3267 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3269 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3270 /* 16 bit write access */
3272 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3275 /* 8 bit write access */
3277 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3281 unsigned long addr1
;
3282 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3284 ptr
= qemu_get_ram_ptr(addr1
);
3285 memcpy(ptr
, buf
, l
);
3286 if (!cpu_physical_memory_is_dirty(addr1
)) {
3287 /* invalidate code */
3288 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3290 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3291 (0xff & ~CODE_DIRTY_FLAG
);
3293 /* qemu doesn't execute guest code directly, but kvm does
3294 therefore flush instruction caches */
3296 flush_icache_range((unsigned long)ptr
,
3297 ((unsigned long)ptr
)+l
);
3300 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3301 !(pd
& IO_MEM_ROMD
)) {
3302 target_phys_addr_t addr1
= addr
;
3304 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3306 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3307 if (l
>= 4 && ((addr1
& 3) == 0)) {
3308 /* 32 bit read access */
3309 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3312 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3313 /* 16 bit read access */
3314 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3318 /* 8 bit read access */
3319 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3325 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3326 (addr
& ~TARGET_PAGE_MASK
);
3327 memcpy(buf
, ptr
, l
);
3336 /* used for ROM loading : can write in RAM and ROM */
3337 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3338 const uint8_t *buf
, int len
)
3342 target_phys_addr_t page
;
3347 page
= addr
& TARGET_PAGE_MASK
;
3348 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3351 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3353 pd
= IO_MEM_UNASSIGNED
;
3355 pd
= p
->phys_offset
;
3358 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3359 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3360 !(pd
& IO_MEM_ROMD
)) {
3363 unsigned long addr1
;
3364 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3366 ptr
= qemu_get_ram_ptr(addr1
);
3367 memcpy(ptr
, buf
, l
);
3377 target_phys_addr_t addr
;
3378 target_phys_addr_t len
;
3381 static BounceBuffer bounce
;
3383 typedef struct MapClient
{
3385 void (*callback
)(void *opaque
);
3386 LIST_ENTRY(MapClient
) link
;
3389 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3390 = LIST_HEAD_INITIALIZER(map_client_list
);
3392 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3394 MapClient
*client
= qemu_malloc(sizeof(*client
));
3396 client
->opaque
= opaque
;
3397 client
->callback
= callback
;
3398 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3402 void cpu_unregister_map_client(void *_client
)
3404 MapClient
*client
= (MapClient
*)_client
;
3406 LIST_REMOVE(client
, link
);
3410 static void cpu_notify_map_clients(void)
3414 while (!LIST_EMPTY(&map_client_list
)) {
3415 client
= LIST_FIRST(&map_client_list
);
3416 client
->callback(client
->opaque
);
3417 cpu_unregister_map_client(client
);
3421 /* Map a physical memory region into a host virtual address.
3422 * May map a subset of the requested range, given by and returned in *plen.
3423 * May return NULL if resources needed to perform the mapping are exhausted.
3424 * Use only for reads OR writes - not for read-modify-write operations.
3425 * Use cpu_register_map_client() to know when retrying the map operation is
3426 * likely to succeed.
3428 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3429 target_phys_addr_t
*plen
,
3432 target_phys_addr_t len
= *plen
;
3433 target_phys_addr_t done
= 0;
3435 uint8_t *ret
= NULL
;
3437 target_phys_addr_t page
;
3440 unsigned long addr1
;
3443 page
= addr
& TARGET_PAGE_MASK
;
3444 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3447 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3449 pd
= IO_MEM_UNASSIGNED
;
3451 pd
= p
->phys_offset
;
3454 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3455 if (done
|| bounce
.buffer
) {
3458 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3462 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3464 ptr
= bounce
.buffer
;
3466 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3467 ptr
= qemu_get_ram_ptr(addr1
);
3471 } else if (ret
+ done
!= ptr
) {
3483 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3484 * Will also mark the memory as dirty if is_write == 1. access_len gives
3485 * the amount of memory that was actually read or written by the caller.
3487 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3488 int is_write
, target_phys_addr_t access_len
)
3490 unsigned long flush_len
= (unsigned long)access_len
;
3492 if (buffer
!= bounce
.buffer
) {
3494 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3495 while (access_len
) {
3497 l
= TARGET_PAGE_SIZE
;
3500 if (!cpu_physical_memory_is_dirty(addr1
)) {
3501 /* invalidate code */
3502 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3504 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3505 (0xff & ~CODE_DIRTY_FLAG
);
3510 dma_flush_range((unsigned long)buffer
,
3511 (unsigned long)buffer
+ flush_len
);
3516 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3518 qemu_free(bounce
.buffer
);
3519 bounce
.buffer
= NULL
;
3520 cpu_notify_map_clients();
3523 /* warning: addr must be aligned */
3524 uint32_t ldl_phys(target_phys_addr_t addr
)
3532 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3534 pd
= IO_MEM_UNASSIGNED
;
3536 pd
= p
->phys_offset
;
3539 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3540 !(pd
& IO_MEM_ROMD
)) {
3542 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3544 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3545 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3548 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3549 (addr
& ~TARGET_PAGE_MASK
);
3555 /* warning: addr must be aligned */
3556 uint64_t ldq_phys(target_phys_addr_t addr
)
3564 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3566 pd
= IO_MEM_UNASSIGNED
;
3568 pd
= p
->phys_offset
;
3571 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3572 !(pd
& IO_MEM_ROMD
)) {
3574 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3576 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3577 #ifdef TARGET_WORDS_BIGENDIAN
3578 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3579 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3581 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3582 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3586 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3587 (addr
& ~TARGET_PAGE_MASK
);
3594 uint32_t ldub_phys(target_phys_addr_t addr
)
3597 cpu_physical_memory_read(addr
, &val
, 1);
3602 uint32_t lduw_phys(target_phys_addr_t addr
)
3605 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3606 return tswap16(val
);
3609 /* warning: addr must be aligned. The ram page is not masked as dirty
3610 and the code inside is not invalidated. It is useful if the dirty
3611 bits are used to track modified PTEs */
3612 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3619 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3621 pd
= IO_MEM_UNASSIGNED
;
3623 pd
= p
->phys_offset
;
3626 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3627 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3629 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3630 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3632 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3633 ptr
= qemu_get_ram_ptr(addr1
);
3636 if (unlikely(in_migration
)) {
3637 if (!cpu_physical_memory_is_dirty(addr1
)) {
3638 /* invalidate code */
3639 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3641 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3642 (0xff & ~CODE_DIRTY_FLAG
);
3648 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3655 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3657 pd
= IO_MEM_UNASSIGNED
;
3659 pd
= p
->phys_offset
;
3662 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3663 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3665 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3666 #ifdef TARGET_WORDS_BIGENDIAN
3667 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3668 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3670 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3671 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3674 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3675 (addr
& ~TARGET_PAGE_MASK
);
3680 /* warning: addr must be aligned */
3681 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3688 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3690 pd
= IO_MEM_UNASSIGNED
;
3692 pd
= p
->phys_offset
;
3695 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3696 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3698 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3699 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3701 unsigned long addr1
;
3702 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3704 ptr
= qemu_get_ram_ptr(addr1
);
3706 if (!cpu_physical_memory_is_dirty(addr1
)) {
3707 /* invalidate code */
3708 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3710 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3711 (0xff & ~CODE_DIRTY_FLAG
);
3717 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3720 cpu_physical_memory_write(addr
, &v
, 1);
3724 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3726 uint16_t v
= tswap16(val
);
3727 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3731 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3734 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3739 /* virtual memory access for debug (includes writing to ROM) */
3740 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3741 uint8_t *buf
, int len
, int is_write
)
3744 target_phys_addr_t phys_addr
;
3748 page
= addr
& TARGET_PAGE_MASK
;
3749 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3750 /* if no physical page mapped, return an error */
3751 if (phys_addr
== -1)
3753 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3756 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3757 #if !defined(CONFIG_USER_ONLY)
3759 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3762 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3770 /* in deterministic execution mode, instructions doing device I/Os
3771 must be at the end of the TB */
3772 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3774 TranslationBlock
*tb
;
3776 target_ulong pc
, cs_base
;
3779 tb
= tb_find_pc((unsigned long)retaddr
);
3781 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3784 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3785 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3786 /* Calculate how many instructions had been executed before the fault
3788 n
= n
- env
->icount_decr
.u16
.low
;
3789 /* Generate a new TB ending on the I/O insn. */
3791 /* On MIPS and SH, delay slot instructions can only be restarted if
3792 they were already the first instruction in the TB. If this is not
3793 the first instruction in a TB then re-execute the preceding
3795 #if defined(TARGET_MIPS)
3796 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3797 env
->active_tc
.PC
-= 4;
3798 env
->icount_decr
.u16
.low
++;
3799 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3801 #elif defined(TARGET_SH4)
3802 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3805 env
->icount_decr
.u16
.low
++;
3806 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3809 /* This should never happen. */
3810 if (n
> CF_COUNT_MASK
)
3811 cpu_abort(env
, "TB too big during recompile");
3813 cflags
= n
| CF_LAST_IO
;
3815 cs_base
= tb
->cs_base
;
3817 tb_phys_invalidate(tb
, -1);
3818 /* FIXME: In theory this could raise an exception. In practice
3819 we have already translated the block once so it's probably ok. */
3820 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3821 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3822 the first in the TB) then we end up generating a whole new TB and
3823 repeating the fault, which is horribly inefficient.
3824 Better would be to execute just this insn uncached, or generate a
3826 cpu_resume_from_signal(env
, NULL
);
3829 void dump_exec_info(FILE *f
,
3830 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3832 int i
, target_code_size
, max_target_code_size
;
3833 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3834 TranslationBlock
*tb
;
3836 target_code_size
= 0;
3837 max_target_code_size
= 0;
3839 direct_jmp_count
= 0;
3840 direct_jmp2_count
= 0;
3841 for(i
= 0; i
< nb_tbs
; i
++) {
3843 target_code_size
+= tb
->size
;
3844 if (tb
->size
> max_target_code_size
)
3845 max_target_code_size
= tb
->size
;
3846 if (tb
->page_addr
[1] != -1)
3848 if (tb
->tb_next_offset
[0] != 0xffff) {
3850 if (tb
->tb_next_offset
[1] != 0xffff) {
3851 direct_jmp2_count
++;
3855 /* XXX: avoid using doubles ? */
3856 cpu_fprintf(f
, "Translation buffer state:\n");
3857 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3858 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3859 cpu_fprintf(f
, "TB count %d/%d\n",
3860 nb_tbs
, code_gen_max_blocks
);
3861 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3862 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3863 max_target_code_size
);
3864 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3865 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3866 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3867 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3869 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3870 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3872 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3874 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3875 cpu_fprintf(f
, "\nStatistics:\n");
3876 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3877 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3878 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3879 tcg_dump_info(f
, cpu_fprintf
);
3882 #if !defined(CONFIG_USER_ONLY)
3884 #define MMUSUFFIX _cmmu
3885 #define GETPC() NULL
3886 #define env cpu_single_env
3887 #define SOFTMMU_CODE_ACCESS
3890 #include "softmmu_template.h"
3893 #include "softmmu_template.h"
3896 #include "softmmu_template.h"
3899 #include "softmmu_template.h"