2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
39 #if !defined(NO_CPU_EMULATION)
40 #include "tcg-target.h"
44 #if defined(CONFIG_USER_ONLY)
48 //#define DEBUG_TB_INVALIDATE
51 //#define DEBUG_UNASSIGNED
53 /* make various TB consistency checks */
54 //#define DEBUG_TB_CHECK
55 //#define DEBUG_TLB_CHECK
57 //#define DEBUG_IOPORT
58 //#define DEBUG_SUBPAGE
60 #if !defined(CONFIG_USER_ONLY)
61 /* TB consistency checks only implemented for usermode emulation. */
65 /* threshold to flush the translated code buffer */
66 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
68 #define SMC_BITMAP_USE_THRESHOLD 10
70 #define MMAP_AREA_START 0x00000000
71 #define MMAP_AREA_END 0xa8000000
73 #if defined(TARGET_SPARC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 41
75 #elif defined(TARGET_SPARC)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 36
77 #elif defined(TARGET_ALPHA)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #define TARGET_VIRT_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_PPC64)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
86 #define TARGET_PHYS_ADDR_SPACE_BITS 42
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
90 #define TARGET_PHYS_ADDR_SPACE_BITS 32
93 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
94 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
96 /* any access to the tbs or the page table must use this lock */
97 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
99 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
100 uint8_t *code_gen_ptr
;
102 ram_addr_t phys_ram_size
;
104 uint8_t *phys_ram_base
;
105 uint8_t *phys_ram_dirty
;
107 static int in_migration
;
108 static ram_addr_t phys_ram_alloc_offset
= 0;
111 /* current CPU in the current thread. It is only valid inside
113 CPUState
*cpu_single_env
;
115 typedef struct PageDesc
{
116 /* list of TBs intersecting this ram page */
117 TranslationBlock
*first_tb
;
118 /* in order to optimize self modifying code, we count the number
119 of lookups we do to a given page to use a bitmap */
120 unsigned int code_write_count
;
121 uint8_t *code_bitmap
;
122 #if defined(CONFIG_USER_ONLY)
127 typedef struct PhysPageDesc
{
128 /* offset in host memory of the page + io_index in the low 12 bits */
129 ram_addr_t phys_offset
;
133 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
134 /* XXX: this is a temporary hack for alpha target.
135 * In the future, this is to be replaced by a multi-level table
136 * to actually be able to handle the complete 64 bits address space.
138 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
140 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
143 #define L1_SIZE (1 << L1_BITS)
144 #define L2_SIZE (1 << L2_BITS)
146 static void io_mem_init(void);
148 unsigned long qemu_real_host_page_size
;
149 unsigned long qemu_host_page_bits
;
150 unsigned long qemu_host_page_size
;
151 unsigned long qemu_host_page_mask
;
153 /* XXX: for system emulation, it could just be an array */
154 static PageDesc
*l1_map
[L1_SIZE
];
155 PhysPageDesc
**l1_phys_map
;
157 /* io memory support */
158 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
159 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
160 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
161 static int io_mem_nb
;
162 #if defined(CONFIG_SOFTMMU)
163 static int io_mem_watch
;
167 char *logfilename
= "/tmp/qemu.log";
170 static int log_append
= 0;
173 static int tlb_flush_count
;
174 static int tb_flush_count
;
175 static int tb_phys_invalidate_count
;
177 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
178 typedef struct subpage_t
{
179 target_phys_addr_t base
;
180 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
181 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
182 void *opaque
[TARGET_PAGE_SIZE
][2][4];
185 static void page_init(void)
187 /* NOTE: we can always suppose that qemu_host_page_size >=
191 SYSTEM_INFO system_info
;
194 GetSystemInfo(&system_info
);
195 qemu_real_host_page_size
= system_info
.dwPageSize
;
197 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
198 PAGE_EXECUTE_READWRITE
, &old_protect
);
201 qemu_real_host_page_size
= getpagesize();
203 unsigned long start
, end
;
205 start
= (unsigned long)code_gen_buffer
;
206 start
&= ~(qemu_real_host_page_size
- 1);
208 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
209 end
+= qemu_real_host_page_size
- 1;
210 end
&= ~(qemu_real_host_page_size
- 1);
212 mprotect((void *)start
, end
- start
,
213 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
217 if (qemu_host_page_size
== 0)
218 qemu_host_page_size
= qemu_real_host_page_size
;
219 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
220 qemu_host_page_size
= TARGET_PAGE_SIZE
;
221 qemu_host_page_bits
= 0;
222 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
223 qemu_host_page_bits
++;
224 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
225 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
226 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
228 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
230 long long startaddr
, endaddr
;
234 f
= fopen("/proc/self/maps", "r");
237 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
239 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
240 TARGET_PAGE_ALIGN(endaddr
),
250 static inline PageDesc
*page_find_alloc(unsigned int index
)
254 lp
= &l1_map
[index
>> L2_BITS
];
257 /* allocate if not found */
258 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
259 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
262 return p
+ (index
& (L2_SIZE
- 1));
265 static inline PageDesc
*page_find(unsigned int index
)
269 p
= l1_map
[index
>> L2_BITS
];
272 return p
+ (index
& (L2_SIZE
- 1));
275 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
280 p
= (void **)l1_phys_map
;
281 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
283 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
284 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
286 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
289 /* allocate if not found */
292 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
293 memset(p
, 0, sizeof(void *) * L1_SIZE
);
297 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
301 /* allocate if not found */
304 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
306 for (i
= 0; i
< L2_SIZE
; i
++)
307 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
309 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
312 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
314 return phys_page_find_alloc(index
, 0);
317 #if !defined(CONFIG_USER_ONLY)
318 static void tlb_protect_code(ram_addr_t ram_addr
);
319 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
323 void cpu_exec_init(CPUState
*env
)
330 code_gen_ptr
= code_gen_buffer
;
334 env
->next_cpu
= NULL
;
337 while (*penv
!= NULL
) {
338 penv
= (CPUState
**)&(*penv
)->next_cpu
;
341 env
->cpu_index
= cpu_index
;
342 env
->nb_watchpoints
= 0;
344 env
->thread_id
= GetCurrentProcessId();
346 env
->thread_id
= getpid();
351 static inline void invalidate_page_bitmap(PageDesc
*p
)
353 if (p
->code_bitmap
) {
354 qemu_free(p
->code_bitmap
);
355 p
->code_bitmap
= NULL
;
357 p
->code_write_count
= 0;
360 /* set to NULL all the 'first_tb' fields in all PageDescs */
361 static void page_flush_tb(void)
366 for(i
= 0; i
< L1_SIZE
; i
++) {
369 for(j
= 0; j
< L2_SIZE
; j
++) {
371 invalidate_page_bitmap(p
);
378 /* flush all the translation blocks */
379 /* XXX: tb_flush is currently not thread safe */
380 void tb_flush(CPUState
*env1
)
383 #if defined(DEBUG_FLUSH)
384 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
385 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
387 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
391 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
392 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
395 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
398 code_gen_ptr
= code_gen_buffer
;
399 /* XXX: flush processor icache at this point if cache flush is
404 #ifdef DEBUG_TB_CHECK
406 static void tb_invalidate_check(target_ulong address
)
408 TranslationBlock
*tb
;
410 address
&= TARGET_PAGE_MASK
;
411 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
412 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
413 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
414 address
>= tb
->pc
+ tb
->size
)) {
415 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
416 address
, (long)tb
->pc
, tb
->size
);
422 /* verify that all the pages have correct rights for code */
423 static void tb_page_check(void)
425 TranslationBlock
*tb
;
426 int i
, flags1
, flags2
;
428 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
429 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
430 flags1
= page_get_flags(tb
->pc
);
431 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
432 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
433 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
434 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
440 void tb_jmp_check(TranslationBlock
*tb
)
442 TranslationBlock
*tb1
;
445 /* suppress any remaining jumps to this TB */
449 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
452 tb1
= tb1
->jmp_next
[n1
];
454 /* check end of list */
456 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
462 /* invalidate one TB */
463 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
466 TranslationBlock
*tb1
;
470 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
473 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
477 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
479 TranslationBlock
*tb1
;
485 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
487 *ptb
= tb1
->page_next
[n1
];
490 ptb
= &tb1
->page_next
[n1
];
494 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
496 TranslationBlock
*tb1
, **ptb
;
499 ptb
= &tb
->jmp_next
[n
];
502 /* find tb(n) in circular list */
506 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
507 if (n1
== n
&& tb1
== tb
)
510 ptb
= &tb1
->jmp_first
;
512 ptb
= &tb1
->jmp_next
[n1
];
515 /* now we can suppress tb(n) from the list */
516 *ptb
= tb
->jmp_next
[n
];
518 tb
->jmp_next
[n
] = NULL
;
522 /* reset the jump entry 'n' of a TB so that it is not chained to
524 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
526 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
529 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
534 target_ulong phys_pc
;
535 TranslationBlock
*tb1
, *tb2
;
537 /* remove the TB from the hash list */
538 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
539 h
= tb_phys_hash_func(phys_pc
);
540 tb_remove(&tb_phys_hash
[h
], tb
,
541 offsetof(TranslationBlock
, phys_hash_next
));
543 /* remove the TB from the page list */
544 if (tb
->page_addr
[0] != page_addr
) {
545 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
546 tb_page_remove(&p
->first_tb
, tb
);
547 invalidate_page_bitmap(p
);
549 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
550 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
551 tb_page_remove(&p
->first_tb
, tb
);
552 invalidate_page_bitmap(p
);
555 tb_invalidated_flag
= 1;
557 /* remove the TB from the hash list */
558 h
= tb_jmp_cache_hash_func(tb
->pc
);
559 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
560 if (env
->tb_jmp_cache
[h
] == tb
)
561 env
->tb_jmp_cache
[h
] = NULL
;
564 /* suppress this TB from the two jump lists */
565 tb_jmp_remove(tb
, 0);
566 tb_jmp_remove(tb
, 1);
568 /* suppress any remaining jumps to this TB */
574 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
575 tb2
= tb1
->jmp_next
[n1
];
576 tb_reset_jump(tb1
, n1
);
577 tb1
->jmp_next
[n1
] = NULL
;
580 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
582 tb_phys_invalidate_count
++;
585 static inline void set_bits(uint8_t *tab
, int start
, int len
)
591 mask
= 0xff << (start
& 7);
592 if ((start
& ~7) == (end
& ~7)) {
594 mask
&= ~(0xff << (end
& 7));
599 start
= (start
+ 8) & ~7;
601 while (start
< end1
) {
606 mask
= ~(0xff << (end
& 7));
612 static void build_page_bitmap(PageDesc
*p
)
614 int n
, tb_start
, tb_end
;
615 TranslationBlock
*tb
;
617 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
620 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
625 tb
= (TranslationBlock
*)((long)tb
& ~3);
626 /* NOTE: this is subtle as a TB may span two physical pages */
628 /* NOTE: tb_end may be after the end of the page, but
629 it is not a problem */
630 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
631 tb_end
= tb_start
+ tb
->size
;
632 if (tb_end
> TARGET_PAGE_SIZE
)
633 tb_end
= TARGET_PAGE_SIZE
;
636 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
638 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
639 tb
= tb
->page_next
[n
];
643 #ifdef TARGET_HAS_PRECISE_SMC
645 static void tb_gen_code(CPUState
*env
,
646 target_ulong pc
, target_ulong cs_base
, int flags
,
649 TranslationBlock
*tb
;
651 target_ulong phys_pc
, phys_page2
, virt_page2
;
654 phys_pc
= get_phys_addr_code(env
, pc
);
657 /* flush must be done */
659 /* cannot fail at this point */
662 tc_ptr
= code_gen_ptr
;
664 tb
->cs_base
= cs_base
;
667 cpu_gen_code(env
, tb
, &code_gen_size
);
668 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
670 /* check next page if needed */
671 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
673 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
674 phys_page2
= get_phys_addr_code(env
, virt_page2
);
676 tb_link_phys(tb
, phys_pc
, phys_page2
);
680 /* invalidate all TBs which intersect with the target physical page
681 starting in range [start;end[. NOTE: start and end must refer to
682 the same physical page. 'is_cpu_write_access' should be true if called
683 from a real cpu write access: the virtual CPU will exit the current
684 TB if code is modified inside this TB. */
685 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
686 int is_cpu_write_access
)
688 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
689 CPUState
*env
= cpu_single_env
;
691 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
692 target_ulong tb_start
, tb_end
;
693 target_ulong current_pc
, current_cs_base
;
695 p
= page_find(start
>> TARGET_PAGE_BITS
);
698 if (!p
->code_bitmap
&&
699 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
700 is_cpu_write_access
) {
701 /* build code bitmap */
702 build_page_bitmap(p
);
705 /* we remove all the TBs in the range [start, end[ */
706 /* XXX: see if in some cases it could be faster to invalidate all the code */
707 current_tb_not_found
= is_cpu_write_access
;
708 current_tb_modified
= 0;
709 current_tb
= NULL
; /* avoid warning */
710 current_pc
= 0; /* avoid warning */
711 current_cs_base
= 0; /* avoid warning */
712 current_flags
= 0; /* avoid warning */
716 tb
= (TranslationBlock
*)((long)tb
& ~3);
717 tb_next
= tb
->page_next
[n
];
718 /* NOTE: this is subtle as a TB may span two physical pages */
720 /* NOTE: tb_end may be after the end of the page, but
721 it is not a problem */
722 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
723 tb_end
= tb_start
+ tb
->size
;
725 tb_start
= tb
->page_addr
[1];
726 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
728 if (!(tb_end
<= start
|| tb_start
>= end
)) {
729 #ifdef TARGET_HAS_PRECISE_SMC
730 if (current_tb_not_found
) {
731 current_tb_not_found
= 0;
733 if (env
->mem_write_pc
) {
734 /* now we have a real cpu fault */
735 current_tb
= tb_find_pc(env
->mem_write_pc
);
738 if (current_tb
== tb
&&
739 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
740 /* If we are modifying the current TB, we must stop
741 its execution. We could be more precise by checking
742 that the modification is after the current PC, but it
743 would require a specialized function to partially
744 restore the CPU state */
746 current_tb_modified
= 1;
747 cpu_restore_state(current_tb
, env
,
748 env
->mem_write_pc
, NULL
);
749 #if defined(TARGET_I386)
750 current_flags
= env
->hflags
;
751 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
752 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
753 current_pc
= current_cs_base
+ env
->eip
;
755 #error unsupported CPU
758 #endif /* TARGET_HAS_PRECISE_SMC */
759 /* we need to do that to handle the case where a signal
760 occurs while doing tb_phys_invalidate() */
763 saved_tb
= env
->current_tb
;
764 env
->current_tb
= NULL
;
766 tb_phys_invalidate(tb
, -1);
768 env
->current_tb
= saved_tb
;
769 if (env
->interrupt_request
&& env
->current_tb
)
770 cpu_interrupt(env
, env
->interrupt_request
);
775 #if !defined(CONFIG_USER_ONLY)
776 /* if no code remaining, no need to continue to use slow writes */
778 invalidate_page_bitmap(p
);
779 if (is_cpu_write_access
) {
780 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
784 #ifdef TARGET_HAS_PRECISE_SMC
785 if (current_tb_modified
) {
786 /* we generate a block containing just the instruction
787 modifying the memory. It will ensure that it cannot modify
789 env
->current_tb
= NULL
;
790 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
792 cpu_resume_from_signal(env
, NULL
);
797 /* len must be <= 8 and start must be a multiple of len */
798 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
805 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
806 cpu_single_env
->mem_write_vaddr
, len
,
808 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
812 p
= page_find(start
>> TARGET_PAGE_BITS
);
815 if (p
->code_bitmap
) {
816 offset
= start
& ~TARGET_PAGE_MASK
;
817 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
818 if (b
& ((1 << len
) - 1))
822 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
826 #if !defined(CONFIG_SOFTMMU)
827 static void tb_invalidate_phys_page(target_ulong addr
,
828 unsigned long pc
, void *puc
)
830 int n
, current_flags
, current_tb_modified
;
831 target_ulong current_pc
, current_cs_base
;
833 TranslationBlock
*tb
, *current_tb
;
834 #ifdef TARGET_HAS_PRECISE_SMC
835 CPUState
*env
= cpu_single_env
;
838 addr
&= TARGET_PAGE_MASK
;
839 p
= page_find(addr
>> TARGET_PAGE_BITS
);
843 current_tb_modified
= 0;
845 current_pc
= 0; /* avoid warning */
846 current_cs_base
= 0; /* avoid warning */
847 current_flags
= 0; /* avoid warning */
848 #ifdef TARGET_HAS_PRECISE_SMC
850 current_tb
= tb_find_pc(pc
);
855 tb
= (TranslationBlock
*)((long)tb
& ~3);
856 #ifdef TARGET_HAS_PRECISE_SMC
857 if (current_tb
== tb
&&
858 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
859 /* If we are modifying the current TB, we must stop
860 its execution. We could be more precise by checking
861 that the modification is after the current PC, but it
862 would require a specialized function to partially
863 restore the CPU state */
865 current_tb_modified
= 1;
866 cpu_restore_state(current_tb
, env
, pc
, puc
);
867 #if defined(TARGET_I386)
868 current_flags
= env
->hflags
;
869 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
870 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
871 current_pc
= current_cs_base
+ env
->eip
;
873 #error unsupported CPU
876 #endif /* TARGET_HAS_PRECISE_SMC */
877 tb_phys_invalidate(tb
, addr
);
878 tb
= tb
->page_next
[n
];
881 #ifdef TARGET_HAS_PRECISE_SMC
882 if (current_tb_modified
) {
883 /* we generate a block containing just the instruction
884 modifying the memory. It will ensure that it cannot modify
886 env
->current_tb
= NULL
;
887 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
889 cpu_resume_from_signal(env
, puc
);
895 /* add the tb in the target page and protect it if necessary */
896 static inline void tb_alloc_page(TranslationBlock
*tb
,
897 unsigned int n
, target_ulong page_addr
)
900 TranslationBlock
*last_first_tb
;
902 tb
->page_addr
[n
] = page_addr
;
903 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
904 tb
->page_next
[n
] = p
->first_tb
;
905 last_first_tb
= p
->first_tb
;
906 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
907 invalidate_page_bitmap(p
);
909 #if defined(TARGET_HAS_SMC) || 1
911 #if defined(CONFIG_USER_ONLY)
912 if (p
->flags
& PAGE_WRITE
) {
917 /* force the host page as non writable (writes will have a
918 page fault + mprotect overhead) */
919 page_addr
&= qemu_host_page_mask
;
921 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
922 addr
+= TARGET_PAGE_SIZE
) {
924 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
928 p2
->flags
&= ~PAGE_WRITE
;
929 page_get_flags(addr
);
931 mprotect(g2h(page_addr
), qemu_host_page_size
,
932 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
933 #ifdef DEBUG_TB_INVALIDATE
934 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
939 /* if some code is already present, then the pages are already
940 protected. So we handle the case where only the first TB is
941 allocated in a physical page */
942 if (!last_first_tb
) {
943 tlb_protect_code(page_addr
);
947 #endif /* TARGET_HAS_SMC */
950 /* Allocate a new translation block. Flush the translation buffer if
951 too many translation blocks or too much generated code. */
952 TranslationBlock
*tb_alloc(target_ulong pc
)
954 TranslationBlock
*tb
;
956 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
957 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
965 /* add a new TB and link it to the physical page tables. phys_page2 is
966 (-1) to indicate that only one page contains the TB. */
967 void tb_link_phys(TranslationBlock
*tb
,
968 target_ulong phys_pc
, target_ulong phys_page2
)
971 TranslationBlock
**ptb
;
973 /* add in the physical hash table */
974 h
= tb_phys_hash_func(phys_pc
);
975 ptb
= &tb_phys_hash
[h
];
976 tb
->phys_hash_next
= *ptb
;
979 /* add in the page list */
980 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
981 if (phys_page2
!= -1)
982 tb_alloc_page(tb
, 1, phys_page2
);
984 tb
->page_addr
[1] = -1;
986 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
987 tb
->jmp_next
[0] = NULL
;
988 tb
->jmp_next
[1] = NULL
;
990 /* init original jump addresses */
991 if (tb
->tb_next_offset
[0] != 0xffff)
992 tb_reset_jump(tb
, 0);
993 if (tb
->tb_next_offset
[1] != 0xffff)
994 tb_reset_jump(tb
, 1);
996 #ifdef DEBUG_TB_CHECK
1001 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1002 tb[1].tc_ptr. Return NULL if not found */
1003 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1005 int m_min
, m_max
, m
;
1007 TranslationBlock
*tb
;
1011 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1012 tc_ptr
>= (unsigned long)code_gen_ptr
)
1014 /* binary search (cf Knuth) */
1017 while (m_min
<= m_max
) {
1018 m
= (m_min
+ m_max
) >> 1;
1020 v
= (unsigned long)tb
->tc_ptr
;
1023 else if (tc_ptr
< v
) {
1032 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1034 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1036 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1039 tb1
= tb
->jmp_next
[n
];
1041 /* find head of list */
1044 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1047 tb1
= tb1
->jmp_next
[n1
];
1049 /* we are now sure now that tb jumps to tb1 */
1052 /* remove tb from the jmp_first list */
1053 ptb
= &tb_next
->jmp_first
;
1057 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1058 if (n1
== n
&& tb1
== tb
)
1060 ptb
= &tb1
->jmp_next
[n1
];
1062 *ptb
= tb
->jmp_next
[n
];
1063 tb
->jmp_next
[n
] = NULL
;
1065 /* suppress the jump to next tb in generated code */
1066 tb_reset_jump(tb
, n
);
1068 /* suppress jumps in the tb on which we could have jumped */
1069 tb_reset_jump_recursive(tb_next
);
1073 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1075 tb_reset_jump_recursive2(tb
, 0);
1076 tb_reset_jump_recursive2(tb
, 1);
1079 #if defined(TARGET_HAS_ICE)
1080 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1082 target_phys_addr_t addr
;
1084 ram_addr_t ram_addr
;
1087 addr
= cpu_get_phys_page_debug(env
, pc
);
1088 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1090 pd
= IO_MEM_UNASSIGNED
;
1092 pd
= p
->phys_offset
;
1094 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1095 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1099 /* Add a watchpoint. */
1100 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1104 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1105 if (addr
== env
->watchpoint
[i
].vaddr
)
1108 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1111 i
= env
->nb_watchpoints
++;
1112 env
->watchpoint
[i
].vaddr
= addr
;
1113 tlb_flush_page(env
, addr
);
1114 /* FIXME: This flush is needed because of the hack to make memory ops
1115 terminate the TB. It can be removed once the proper IO trap and
1116 re-execute bits are in. */
1121 /* Remove a watchpoint. */
1122 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1126 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1127 if (addr
== env
->watchpoint
[i
].vaddr
) {
1128 env
->nb_watchpoints
--;
1129 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1130 tlb_flush_page(env
, addr
);
1137 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1138 breakpoint is reached */
1139 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1141 #if defined(TARGET_HAS_ICE)
1144 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1145 if (env
->breakpoints
[i
] == pc
)
1149 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1151 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1154 kvm_update_debugger(env
);
1156 breakpoint_invalidate(env
, pc
);
1163 /* remove a breakpoint */
1164 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1166 #if defined(TARGET_HAS_ICE)
1168 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1169 if (env
->breakpoints
[i
] == pc
)
1174 env
->nb_breakpoints
--;
1175 if (i
< env
->nb_breakpoints
)
1176 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1179 kvm_update_debugger(env
);
1181 breakpoint_invalidate(env
, pc
);
1188 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1189 CPU loop after each instruction */
1190 void cpu_single_step(CPUState
*env
, int enabled
)
1192 #if defined(TARGET_HAS_ICE)
1193 if (env
->singlestep_enabled
!= enabled
) {
1194 env
->singlestep_enabled
= enabled
;
1195 /* must flush all the translated code to avoid inconsistancies */
1196 /* XXX: only flush what is necessary */
1200 kvm_update_debugger(env
);
1204 /* enable or disable low levels log */
1205 void cpu_set_log(int log_flags
)
1207 loglevel
= log_flags
;
1208 if (loglevel
&& !logfile
) {
1209 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1211 perror(logfilename
);
1214 #if !defined(CONFIG_SOFTMMU)
1215 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1217 static uint8_t logfile_buf
[4096];
1218 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1221 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1225 if (!loglevel
&& logfile
) {
1231 void cpu_set_log_filename(const char *filename
)
1233 logfilename
= strdup(filename
);
1238 cpu_set_log(loglevel
);
1241 /* mask must never be zero, except for A20 change call */
1242 void cpu_interrupt(CPUState
*env
, int mask
)
1244 TranslationBlock
*tb
;
1245 static int interrupt_lock
;
1247 env
->interrupt_request
|= mask
;
1248 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1249 kvm_update_interrupt_request(env
);
1251 /* if the cpu is currently executing code, we must unlink it and
1252 all the potentially executing TB */
1253 tb
= env
->current_tb
;
1254 if (tb
&& !testandset(&interrupt_lock
)) {
1255 env
->current_tb
= NULL
;
1256 tb_reset_jump_recursive(tb
);
1261 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1263 env
->interrupt_request
&= ~mask
;
1266 CPULogItem cpu_log_items
[] = {
1267 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1268 "show generated host assembly code for each compiled TB" },
1269 { CPU_LOG_TB_IN_ASM
, "in_asm",
1270 "show target assembly code for each compiled TB" },
1271 { CPU_LOG_TB_OP
, "op",
1272 "show micro ops for each compiled TB" },
1274 { CPU_LOG_TB_OP_OPT
, "op_opt",
1275 "show micro ops before eflags optimization" },
1277 { CPU_LOG_INT
, "int",
1278 "show interrupts/exceptions in short format" },
1279 { CPU_LOG_EXEC
, "exec",
1280 "show trace before each executed TB (lots of logs)" },
1281 { CPU_LOG_TB_CPU
, "cpu",
1282 "show CPU state before block translation" },
1284 { CPU_LOG_PCALL
, "pcall",
1285 "show protected mode far calls/returns/exceptions" },
1288 { CPU_LOG_IOPORT
, "ioport",
1289 "show all i/o ports accesses" },
1294 static int cmp1(const char *s1
, int n
, const char *s2
)
1296 if (strlen(s2
) != n
)
1298 return memcmp(s1
, s2
, n
) == 0;
1301 /* takes a comma separated list of log masks. Return 0 if error. */
1302 int cpu_str_to_log_mask(const char *str
)
1311 p1
= strchr(p
, ',');
1314 if(cmp1(p
,p1
-p
,"all")) {
1315 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1319 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1320 if (cmp1(p
, p1
- p
, item
->name
))
1334 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1341 fprintf(stderr
, "qemu: fatal: ");
1342 vfprintf(stderr
, fmt
, ap
);
1343 fprintf(stderr
, "\n");
1345 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1346 /* most probably the virtual machine should not
1347 be shut down but rather caught by the VMM */
1348 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1350 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1352 cpu_dump_state(env
, stderr
, fprintf
, 0);
1355 fprintf(logfile
, "qemu: fatal: ");
1356 vfprintf(logfile
, fmt
, ap2
);
1357 fprintf(logfile
, "\n");
1359 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1361 cpu_dump_state(env
, logfile
, fprintf
, 0);
1371 CPUState
*cpu_copy(CPUState
*env
)
1373 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1374 /* preserve chaining and index */
1375 CPUState
*next_cpu
= new_env
->next_cpu
;
1376 int cpu_index
= new_env
->cpu_index
;
1377 memcpy(new_env
, env
, sizeof(CPUState
));
1378 new_env
->next_cpu
= next_cpu
;
1379 new_env
->cpu_index
= cpu_index
;
1383 #if !defined(CONFIG_USER_ONLY)
1385 /* NOTE: if flush_global is true, also flush global entries (not
1387 void tlb_flush(CPUState
*env
, int flush_global
)
1391 #if defined(DEBUG_TLB)
1392 printf("tlb_flush:\n");
1394 /* must reset current TB so that interrupts cannot modify the
1395 links while we are modifying them */
1396 env
->current_tb
= NULL
;
1398 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1399 env
->tlb_table
[0][i
].addr_read
= -1;
1400 env
->tlb_table
[0][i
].addr_write
= -1;
1401 env
->tlb_table
[0][i
].addr_code
= -1;
1402 env
->tlb_table
[1][i
].addr_read
= -1;
1403 env
->tlb_table
[1][i
].addr_write
= -1;
1404 env
->tlb_table
[1][i
].addr_code
= -1;
1405 #if (NB_MMU_MODES >= 3)
1406 env
->tlb_table
[2][i
].addr_read
= -1;
1407 env
->tlb_table
[2][i
].addr_write
= -1;
1408 env
->tlb_table
[2][i
].addr_code
= -1;
1409 #if (NB_MMU_MODES == 4)
1410 env
->tlb_table
[3][i
].addr_read
= -1;
1411 env
->tlb_table
[3][i
].addr_write
= -1;
1412 env
->tlb_table
[3][i
].addr_code
= -1;
1417 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1419 #if !defined(CONFIG_SOFTMMU)
1420 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1423 if (env
->kqemu_enabled
) {
1424 kqemu_flush(env
, flush_global
);
1430 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1432 if (addr
== (tlb_entry
->addr_read
&
1433 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1434 addr
== (tlb_entry
->addr_write
&
1435 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1436 addr
== (tlb_entry
->addr_code
&
1437 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1438 tlb_entry
->addr_read
= -1;
1439 tlb_entry
->addr_write
= -1;
1440 tlb_entry
->addr_code
= -1;
1444 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1447 TranslationBlock
*tb
;
1449 #if defined(DEBUG_TLB)
1450 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1452 /* must reset current TB so that interrupts cannot modify the
1453 links while we are modifying them */
1454 env
->current_tb
= NULL
;
1456 addr
&= TARGET_PAGE_MASK
;
1457 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1458 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1459 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1460 #if (NB_MMU_MODES >= 3)
1461 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1462 #if (NB_MMU_MODES == 4)
1463 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1467 /* Discard jump cache entries for any tb which might potentially
1468 overlap the flushed page. */
1469 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1470 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1472 i
= tb_jmp_cache_hash_page(addr
);
1473 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1475 #if !defined(CONFIG_SOFTMMU)
1476 if (addr
< MMAP_AREA_END
)
1477 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1480 if (env
->kqemu_enabled
) {
1481 kqemu_flush_page(env
, addr
);
1486 /* update the TLBs so that writes to code in the virtual page 'addr'
1488 static void tlb_protect_code(ram_addr_t ram_addr
)
1490 cpu_physical_memory_reset_dirty(ram_addr
,
1491 ram_addr
+ TARGET_PAGE_SIZE
,
1495 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1496 tested for self modifying code */
1497 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1500 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1503 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1504 unsigned long start
, unsigned long length
)
1507 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1508 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1509 if ((addr
- start
) < length
) {
1510 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1515 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1519 unsigned long length
, start1
;
1523 start
&= TARGET_PAGE_MASK
;
1524 end
= TARGET_PAGE_ALIGN(end
);
1526 length
= end
- start
;
1529 len
= length
>> TARGET_PAGE_BITS
;
1531 /* XXX: should not depend on cpu context */
1533 if (env
->kqemu_enabled
) {
1536 for(i
= 0; i
< len
; i
++) {
1537 kqemu_set_notdirty(env
, addr
);
1538 addr
+= TARGET_PAGE_SIZE
;
1542 mask
= ~dirty_flags
;
1543 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1544 for(i
= 0; i
< len
; i
++)
1547 /* we modify the TLB cache so that the dirty bit will be set again
1548 when accessing the range */
1549 start1
= start
+ (unsigned long)phys_ram_base
;
1550 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1551 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1552 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1553 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1554 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1555 #if (NB_MMU_MODES >= 3)
1556 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1557 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1558 #if (NB_MMU_MODES == 4)
1559 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1560 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1565 #if !defined(CONFIG_SOFTMMU)
1566 /* XXX: this is expensive */
1572 for(i
= 0; i
< L1_SIZE
; i
++) {
1575 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1576 for(j
= 0; j
< L2_SIZE
; j
++) {
1577 if (p
->valid_tag
== virt_valid_tag
&&
1578 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1579 (p
->prot
& PROT_WRITE
)) {
1580 if (addr
< MMAP_AREA_END
) {
1581 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1582 p
->prot
& ~PROT_WRITE
);
1585 addr
+= TARGET_PAGE_SIZE
;
1594 int cpu_physical_memory_set_dirty_tracking(int enable
)
1599 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1600 in_migration
= enable
;
1604 int cpu_physical_memory_get_dirty_tracking(void)
1606 return in_migration
;
1609 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1611 ram_addr_t ram_addr
;
1613 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1614 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1615 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1616 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1617 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1622 /* update the TLB according to the current state of the dirty bits */
1623 void cpu_tlb_update_dirty(CPUState
*env
)
1626 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1627 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1628 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1629 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1630 #if (NB_MMU_MODES >= 3)
1631 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1632 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1633 #if (NB_MMU_MODES == 4)
1634 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1635 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1640 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1641 unsigned long start
)
1644 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1645 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1646 if (addr
== start
) {
1647 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1652 /* update the TLB corresponding to virtual page vaddr and phys addr
1653 addr so that it is no longer dirty */
1654 static inline void tlb_set_dirty(CPUState
*env
,
1655 unsigned long addr
, target_ulong vaddr
)
1659 addr
&= TARGET_PAGE_MASK
;
1660 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1661 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1662 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1663 #if (NB_MMU_MODES >= 3)
1664 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1665 #if (NB_MMU_MODES == 4)
1666 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1671 /* add a new TLB entry. At most one entry for a given virtual address
1672 is permitted. Return 0 if OK or 2 if the page could not be mapped
1673 (can only happen in non SOFTMMU mode for I/O pages or pages
1674 conflicting with the host address space). */
1675 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1676 target_phys_addr_t paddr
, int prot
,
1677 int mmu_idx
, int is_softmmu
)
1682 target_ulong address
;
1683 target_phys_addr_t addend
;
1688 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1690 pd
= IO_MEM_UNASSIGNED
;
1692 pd
= p
->phys_offset
;
1694 #if defined(DEBUG_TLB)
1695 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1696 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1700 #if !defined(CONFIG_SOFTMMU)
1704 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1705 /* IO memory case */
1706 address
= vaddr
| pd
;
1709 /* standard memory */
1711 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1714 /* Make accesses to pages with watchpoints go via the
1715 watchpoint trap routines. */
1716 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1717 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1718 if (address
& ~TARGET_PAGE_MASK
) {
1719 env
->watchpoint
[i
].addend
= 0;
1720 address
= vaddr
| io_mem_watch
;
1722 env
->watchpoint
[i
].addend
= pd
- paddr
+
1723 (unsigned long) phys_ram_base
;
1724 /* TODO: Figure out how to make read watchpoints coexist
1726 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1731 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1733 te
= &env
->tlb_table
[mmu_idx
][index
];
1734 te
->addend
= addend
;
1735 if (prot
& PAGE_READ
) {
1736 te
->addr_read
= address
;
1740 if (prot
& PAGE_EXEC
) {
1741 te
->addr_code
= address
;
1745 if (prot
& PAGE_WRITE
) {
1746 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1747 (pd
& IO_MEM_ROMD
)) {
1748 /* write access calls the I/O callback */
1749 te
->addr_write
= vaddr
|
1750 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1751 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1752 !cpu_physical_memory_is_dirty(pd
)) {
1753 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1755 te
->addr_write
= address
;
1758 te
->addr_write
= -1;
1761 #if !defined(CONFIG_SOFTMMU)
1763 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1764 /* IO access: no mapping is done as it will be handled by the
1766 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1771 if (vaddr
>= MMAP_AREA_END
) {
1774 if (prot
& PROT_WRITE
) {
1775 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1776 #if defined(TARGET_HAS_SMC) || 1
1779 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1780 !cpu_physical_memory_is_dirty(pd
))) {
1781 /* ROM: we do as if code was inside */
1782 /* if code is present, we only map as read only and save the
1786 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1789 vp
->valid_tag
= virt_valid_tag
;
1790 prot
&= ~PAGE_WRITE
;
1793 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1794 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1795 if (map_addr
== MAP_FAILED
) {
1796 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1806 /* called from signal handler: invalidate the code and unprotect the
1807 page. Return TRUE if the fault was succesfully handled. */
1808 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1810 #if !defined(CONFIG_SOFTMMU)
1813 #if defined(DEBUG_TLB)
1814 printf("page_unprotect: addr=0x%08x\n", addr
);
1816 addr
&= TARGET_PAGE_MASK
;
1818 /* if it is not mapped, no need to worry here */
1819 if (addr
>= MMAP_AREA_END
)
1821 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1824 /* NOTE: in this case, validate_tag is _not_ tested as it
1825 validates only the code TLB */
1826 if (vp
->valid_tag
!= virt_valid_tag
)
1828 if (!(vp
->prot
& PAGE_WRITE
))
1830 #if defined(DEBUG_TLB)
1831 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1832 addr
, vp
->phys_addr
, vp
->prot
);
1834 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1835 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1836 (unsigned long)addr
, vp
->prot
);
1837 /* set the dirty bit */
1838 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1839 /* flush the code inside */
1840 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1849 void tlb_flush(CPUState
*env
, int flush_global
)
1853 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1857 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1858 target_phys_addr_t paddr
, int prot
,
1859 int mmu_idx
, int is_softmmu
)
1864 /* dump memory mappings */
1865 void page_dump(FILE *f
)
1867 unsigned long start
, end
;
1868 int i
, j
, prot
, prot1
;
1871 fprintf(f
, "%-8s %-8s %-8s %s\n",
1872 "start", "end", "size", "prot");
1876 for(i
= 0; i
<= L1_SIZE
; i
++) {
1881 for(j
= 0;j
< L2_SIZE
; j
++) {
1886 if (prot1
!= prot
) {
1887 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1889 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1890 start
, end
, end
- start
,
1891 prot
& PAGE_READ
? 'r' : '-',
1892 prot
& PAGE_WRITE
? 'w' : '-',
1893 prot
& PAGE_EXEC
? 'x' : '-');
1907 int page_get_flags(target_ulong address
)
1911 p
= page_find(address
>> TARGET_PAGE_BITS
);
1917 /* modify the flags of a page and invalidate the code if
1918 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1919 depending on PAGE_WRITE */
1920 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1925 start
= start
& TARGET_PAGE_MASK
;
1926 end
= TARGET_PAGE_ALIGN(end
);
1927 if (flags
& PAGE_WRITE
)
1928 flags
|= PAGE_WRITE_ORG
;
1929 spin_lock(&tb_lock
);
1930 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1931 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1932 /* if the write protection is set, then we invalidate the code
1934 if (!(p
->flags
& PAGE_WRITE
) &&
1935 (flags
& PAGE_WRITE
) &&
1937 tb_invalidate_phys_page(addr
, 0, NULL
);
1941 spin_unlock(&tb_lock
);
1944 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1950 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1951 start
= start
& TARGET_PAGE_MASK
;
1954 /* we've wrapped around */
1956 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1957 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1960 if( !(p
->flags
& PAGE_VALID
) )
1963 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1965 if (flags
& PAGE_WRITE
) {
1966 if (!(p
->flags
& PAGE_WRITE_ORG
))
1968 /* unprotect the page if it was put read-only because it
1969 contains translated code */
1970 if (!(p
->flags
& PAGE_WRITE
)) {
1971 if (!page_unprotect(addr
, 0, NULL
))
1980 /* called from signal handler: invalidate the code and unprotect the
1981 page. Return TRUE if the fault was succesfully handled. */
1982 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1984 unsigned int page_index
, prot
, pindex
;
1986 target_ulong host_start
, host_end
, addr
;
1988 host_start
= address
& qemu_host_page_mask
;
1989 page_index
= host_start
>> TARGET_PAGE_BITS
;
1990 p1
= page_find(page_index
);
1993 host_end
= host_start
+ qemu_host_page_size
;
1996 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2000 /* if the page was really writable, then we change its
2001 protection back to writable */
2002 if (prot
& PAGE_WRITE_ORG
) {
2003 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2004 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2005 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2006 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2007 p1
[pindex
].flags
|= PAGE_WRITE
;
2008 /* and since the content will be modified, we must invalidate
2009 the corresponding translated code. */
2010 tb_invalidate_phys_page(address
, pc
, puc
);
2011 #ifdef DEBUG_TB_CHECK
2012 tb_invalidate_check(address
);
2020 static inline void tlb_set_dirty(CPUState
*env
,
2021 unsigned long addr
, target_ulong vaddr
)
2024 #endif /* defined(CONFIG_USER_ONLY) */
2026 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2028 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2030 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2033 if (addr > start_addr) \
2036 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2037 if (start_addr2 > 0) \
2041 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2042 end_addr2 = TARGET_PAGE_SIZE - 1; \
2044 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2045 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2050 /* register physical memory. 'size' must be a multiple of the target
2051 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2053 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2055 unsigned long phys_offset
)
2057 target_phys_addr_t addr
, end_addr
;
2060 unsigned long orig_size
= size
;
2063 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2064 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2065 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2066 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2067 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2068 unsigned long orig_memory
= p
->phys_offset
;
2069 target_phys_addr_t start_addr2
, end_addr2
;
2070 int need_subpage
= 0;
2072 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2074 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2075 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2076 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2077 &p
->phys_offset
, orig_memory
);
2079 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2082 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2084 p
->phys_offset
= phys_offset
;
2085 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2086 (phys_offset
& IO_MEM_ROMD
))
2087 phys_offset
+= TARGET_PAGE_SIZE
;
2090 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2091 p
->phys_offset
= phys_offset
;
2092 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2093 (phys_offset
& IO_MEM_ROMD
))
2094 phys_offset
+= TARGET_PAGE_SIZE
;
2096 target_phys_addr_t start_addr2
, end_addr2
;
2097 int need_subpage
= 0;
2099 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2100 end_addr2
, need_subpage
);
2102 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2103 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2104 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2105 subpage_register(subpage
, start_addr2
, end_addr2
,
2112 /* since each CPU stores ram addresses in its TLB cache, we must
2113 reset the modified entries */
2115 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2120 /* XXX: temporary until new memory mapping API */
2121 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2125 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2127 return IO_MEM_UNASSIGNED
;
2128 return p
->phys_offset
;
2131 /* XXX: better than nothing */
2132 ram_addr_t
qemu_ram_alloc(unsigned long size
)
2135 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2136 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2137 size
, phys_ram_size
);
2140 addr
= phys_ram_alloc_offset
;
2141 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2145 void qemu_ram_free(ram_addr_t addr
)
2149 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2151 #ifdef DEBUG_UNASSIGNED
2152 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2155 do_unassigned_access(addr
, 0, 0, 0);
2157 do_unassigned_access(addr
, 0, 0, 0);
2162 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2164 #ifdef DEBUG_UNASSIGNED
2165 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2168 do_unassigned_access(addr
, 1, 0, 0);
2170 do_unassigned_access(addr
, 1, 0, 0);
2174 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2175 unassigned_mem_readb
,
2176 unassigned_mem_readb
,
2177 unassigned_mem_readb
,
2180 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2181 unassigned_mem_writeb
,
2182 unassigned_mem_writeb
,
2183 unassigned_mem_writeb
,
2186 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2188 unsigned long ram_addr
;
2190 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2191 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2192 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2193 #if !defined(CONFIG_USER_ONLY)
2194 tb_invalidate_phys_page_fast(ram_addr
, 1);
2195 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2198 stb_p((uint8_t *)(long)addr
, val
);
2200 if (cpu_single_env
->kqemu_enabled
&&
2201 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2202 kqemu_modify_page(cpu_single_env
, ram_addr
);
2204 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2205 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2206 /* we remove the notdirty callback only if the code has been
2208 if (dirty_flags
== 0xff)
2209 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2212 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2214 unsigned long ram_addr
;
2216 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2217 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2218 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2219 #if !defined(CONFIG_USER_ONLY)
2220 tb_invalidate_phys_page_fast(ram_addr
, 2);
2221 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2224 stw_p((uint8_t *)(long)addr
, val
);
2226 if (cpu_single_env
->kqemu_enabled
&&
2227 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2228 kqemu_modify_page(cpu_single_env
, ram_addr
);
2230 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2231 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2232 /* we remove the notdirty callback only if the code has been
2234 if (dirty_flags
== 0xff)
2235 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2238 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2240 unsigned long ram_addr
;
2242 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2243 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2244 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2245 #if !defined(CONFIG_USER_ONLY)
2246 tb_invalidate_phys_page_fast(ram_addr
, 4);
2247 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2250 stl_p((uint8_t *)(long)addr
, val
);
2252 if (cpu_single_env
->kqemu_enabled
&&
2253 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2254 kqemu_modify_page(cpu_single_env
, ram_addr
);
2256 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2257 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2258 /* we remove the notdirty callback only if the code has been
2260 if (dirty_flags
== 0xff)
2261 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2264 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2265 NULL
, /* never used */
2266 NULL
, /* never used */
2267 NULL
, /* never used */
2270 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2271 notdirty_mem_writeb
,
2272 notdirty_mem_writew
,
2273 notdirty_mem_writel
,
2276 #if defined(CONFIG_SOFTMMU)
2277 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2278 so these check for a hit then pass through to the normal out-of-line
2280 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2282 return ldub_phys(addr
);
2285 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2287 return lduw_phys(addr
);
2290 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2292 return ldl_phys(addr
);
2295 /* Generate a debug exception if a watchpoint has been hit.
2296 Returns the real physical address of the access. addr will be a host
2297 address in case of a RAM location. */
2298 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2300 CPUState
*env
= cpu_single_env
;
2302 target_ulong retaddr
;
2306 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2307 watch
= env
->watchpoint
[i
].vaddr
;
2308 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2309 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2310 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2311 cpu_single_env
->watchpoint_hit
= i
+ 1;
2312 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2320 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2323 addr
= check_watchpoint(addr
);
2324 stb_phys(addr
, val
);
2327 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2330 addr
= check_watchpoint(addr
);
2331 stw_phys(addr
, val
);
2334 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2337 addr
= check_watchpoint(addr
);
2338 stl_phys(addr
, val
);
2341 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2347 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2354 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2360 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2361 #if defined(DEBUG_SUBPAGE)
2362 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2363 mmio
, len
, addr
, idx
);
2365 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2370 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2371 uint32_t value
, unsigned int len
)
2375 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2376 #if defined(DEBUG_SUBPAGE)
2377 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2378 mmio
, len
, addr
, idx
, value
);
2380 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2383 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2389 return subpage_readlen(opaque
, addr
, 0);
2392 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2395 #if defined(DEBUG_SUBPAGE)
2396 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2398 subpage_writelen(opaque
, addr
, value
, 0);
2401 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2407 return subpage_readlen(opaque
, addr
, 1);
2410 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2413 #if defined(DEBUG_SUBPAGE)
2414 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2416 subpage_writelen(opaque
, addr
, value
, 1);
2419 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2421 #if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2425 return subpage_readlen(opaque
, addr
, 2);
2428 static void subpage_writel (void *opaque
,
2429 target_phys_addr_t addr
, uint32_t value
)
2431 #if defined(DEBUG_SUBPAGE)
2432 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2434 subpage_writelen(opaque
, addr
, value
, 2);
2437 static CPUReadMemoryFunc
*subpage_read
[] = {
2443 static CPUWriteMemoryFunc
*subpage_write
[] = {
2449 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2455 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2457 idx
= SUBPAGE_IDX(start
);
2458 eidx
= SUBPAGE_IDX(end
);
2459 #if defined(DEBUG_SUBPAGE)
2460 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2461 mmio
, start
, end
, idx
, eidx
, memory
);
2463 memory
>>= IO_MEM_SHIFT
;
2464 for (; idx
<= eidx
; idx
++) {
2465 for (i
= 0; i
< 4; i
++) {
2466 if (io_mem_read
[memory
][i
]) {
2467 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2468 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2470 if (io_mem_write
[memory
][i
]) {
2471 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2472 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2480 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2486 mmio
= qemu_mallocz(sizeof(subpage_t
));
2489 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2490 #if defined(DEBUG_SUBPAGE)
2491 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2492 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2494 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2495 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2501 static void io_mem_init(void)
2503 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2504 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2505 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2508 #if defined(CONFIG_SOFTMMU)
2509 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2510 watch_mem_write
, NULL
);
2512 /* alloc dirty bits array */
2513 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2514 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2517 /* mem_read and mem_write are arrays of functions containing the
2518 function to access byte (index 0), word (index 1) and dword (index
2519 2). Functions can be omitted with a NULL function pointer. The
2520 registered functions may be modified dynamically later.
2521 If io_index is non zero, the corresponding io zone is
2522 modified. If it is zero, a new io zone is allocated. The return
2523 value can be used with cpu_register_physical_memory(). (-1) is
2524 returned if error. */
2525 int cpu_register_io_memory(int io_index
,
2526 CPUReadMemoryFunc
**mem_read
,
2527 CPUWriteMemoryFunc
**mem_write
,
2530 int i
, subwidth
= 0;
2532 if (io_index
<= 0) {
2533 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2535 io_index
= io_mem_nb
++;
2537 if (io_index
>= IO_MEM_NB_ENTRIES
)
2541 for(i
= 0;i
< 3; i
++) {
2542 if (!mem_read
[i
] || !mem_write
[i
])
2543 subwidth
= IO_MEM_SUBWIDTH
;
2544 io_mem_read
[io_index
][i
] = mem_read
[i
];
2545 io_mem_write
[io_index
][i
] = mem_write
[i
];
2547 io_mem_opaque
[io_index
] = opaque
;
2548 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2551 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2553 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2556 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2558 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2561 /* physical memory access (slow version, mainly for debug) */
2562 #if defined(CONFIG_USER_ONLY)
2563 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2564 int len
, int is_write
)
2571 page
= addr
& TARGET_PAGE_MASK
;
2572 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2575 flags
= page_get_flags(page
);
2576 if (!(flags
& PAGE_VALID
))
2579 if (!(flags
& PAGE_WRITE
))
2581 /* XXX: this code should not depend on lock_user */
2582 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2583 /* FIXME - should this return an error rather than just fail? */
2585 memcpy(p
, buf
, len
);
2586 unlock_user(p
, addr
, len
);
2588 if (!(flags
& PAGE_READ
))
2590 /* XXX: this code should not depend on lock_user */
2591 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2592 /* FIXME - should this return an error rather than just fail? */
2594 memcpy(buf
, p
, len
);
2595 unlock_user(p
, addr
, 0);
2604 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2605 int len
, int is_write
)
2610 target_phys_addr_t page
;
2615 page
= addr
& TARGET_PAGE_MASK
;
2616 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2619 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2621 pd
= IO_MEM_UNASSIGNED
;
2623 pd
= p
->phys_offset
;
2627 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2628 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2629 /* XXX: could force cpu_single_env to NULL to avoid
2631 if (l
>= 4 && ((addr
& 3) == 0)) {
2632 /* 32 bit write access */
2634 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2636 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2637 /* 16 bit write access */
2639 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2642 /* 8 bit write access */
2644 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2648 unsigned long addr1
;
2649 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2651 ptr
= phys_ram_base
+ addr1
;
2652 memcpy(ptr
, buf
, l
);
2653 if (!cpu_physical_memory_is_dirty(addr1
)) {
2654 /* invalidate code */
2655 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2657 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2658 (0xff & ~CODE_DIRTY_FLAG
);
2660 /* qemu doesn't execute guest code directly, but kvm does
2661 therefore fluch instruction caches */
2663 flush_icache_range((unsigned long)ptr
,
2664 ((unsigned long)ptr
)+l
);
2667 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2668 !(pd
& IO_MEM_ROMD
)) {
2670 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2671 if (l
>= 4 && ((addr
& 3) == 0)) {
2672 /* 32 bit read access */
2673 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2676 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2677 /* 16 bit read access */
2678 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2682 /* 8 bit read access */
2683 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2689 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2690 (addr
& ~TARGET_PAGE_MASK
);
2691 memcpy(buf
, ptr
, l
);
2700 /* used for ROM loading : can write in RAM and ROM */
2701 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2702 const uint8_t *buf
, int len
)
2706 target_phys_addr_t page
;
2711 page
= addr
& TARGET_PAGE_MASK
;
2712 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2715 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2717 pd
= IO_MEM_UNASSIGNED
;
2719 pd
= p
->phys_offset
;
2722 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2723 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2724 !(pd
& IO_MEM_ROMD
)) {
2727 unsigned long addr1
;
2728 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2730 ptr
= phys_ram_base
+ addr1
;
2731 memcpy(ptr
, buf
, l
);
2740 /* warning: addr must be aligned */
2741 uint32_t ldl_phys(target_phys_addr_t addr
)
2749 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2751 pd
= IO_MEM_UNASSIGNED
;
2753 pd
= p
->phys_offset
;
2756 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2757 !(pd
& IO_MEM_ROMD
)) {
2759 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2760 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2763 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2764 (addr
& ~TARGET_PAGE_MASK
);
2770 /* warning: addr must be aligned */
2771 uint64_t ldq_phys(target_phys_addr_t addr
)
2779 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2781 pd
= IO_MEM_UNASSIGNED
;
2783 pd
= p
->phys_offset
;
2786 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2787 !(pd
& IO_MEM_ROMD
)) {
2789 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2790 #ifdef TARGET_WORDS_BIGENDIAN
2791 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2792 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2794 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2795 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2799 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2800 (addr
& ~TARGET_PAGE_MASK
);
2807 uint32_t ldub_phys(target_phys_addr_t addr
)
2810 cpu_physical_memory_read(addr
, &val
, 1);
2815 uint32_t lduw_phys(target_phys_addr_t addr
)
2818 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2819 return tswap16(val
);
2823 #define likely(x) __builtin_expect(!!(x), 1)
2824 #define unlikely(x) __builtin_expect(!!(x), 0)
2827 #define unlikely(x) x
2830 /* warning: addr must be aligned. The ram page is not masked as dirty
2831 and the code inside is not invalidated. It is useful if the dirty
2832 bits are used to track modified PTEs */
2833 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2840 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2842 pd
= IO_MEM_UNASSIGNED
;
2844 pd
= p
->phys_offset
;
2847 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2848 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2849 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2851 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2852 ptr
= phys_ram_base
+ addr1
;
2855 if (unlikely(in_migration
)) {
2856 if (!cpu_physical_memory_is_dirty(addr1
)) {
2857 /* invalidate code */
2858 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2860 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2861 (0xff & ~CODE_DIRTY_FLAG
);
2867 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2874 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2876 pd
= IO_MEM_UNASSIGNED
;
2878 pd
= p
->phys_offset
;
2881 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2882 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2883 #ifdef TARGET_WORDS_BIGENDIAN
2884 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2885 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2887 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2888 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2891 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2892 (addr
& ~TARGET_PAGE_MASK
);
2897 /* warning: addr must be aligned */
2898 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2905 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2907 pd
= IO_MEM_UNASSIGNED
;
2909 pd
= p
->phys_offset
;
2912 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2913 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2914 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2916 unsigned long addr1
;
2917 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2919 ptr
= phys_ram_base
+ addr1
;
2921 if (!cpu_physical_memory_is_dirty(addr1
)) {
2922 /* invalidate code */
2923 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2925 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2926 (0xff & ~CODE_DIRTY_FLAG
);
2932 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2935 cpu_physical_memory_write(addr
, &v
, 1);
2939 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2941 uint16_t v
= tswap16(val
);
2942 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2946 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2949 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2954 /* virtual memory access for debug */
2955 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2956 uint8_t *buf
, int len
, int is_write
)
2959 target_phys_addr_t phys_addr
;
2963 page
= addr
& TARGET_PAGE_MASK
;
2964 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2965 /* if no physical page mapped, return an error */
2966 if (phys_addr
== -1)
2968 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2971 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2980 void dump_exec_info(FILE *f
,
2981 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2983 int i
, target_code_size
, max_target_code_size
;
2984 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2985 TranslationBlock
*tb
;
2987 target_code_size
= 0;
2988 max_target_code_size
= 0;
2990 direct_jmp_count
= 0;
2991 direct_jmp2_count
= 0;
2992 for(i
= 0; i
< nb_tbs
; i
++) {
2994 target_code_size
+= tb
->size
;
2995 if (tb
->size
> max_target_code_size
)
2996 max_target_code_size
= tb
->size
;
2997 if (tb
->page_addr
[1] != -1)
2999 if (tb
->tb_next_offset
[0] != 0xffff) {
3001 if (tb
->tb_next_offset
[1] != 0xffff) {
3002 direct_jmp2_count
++;
3006 /* XXX: avoid using doubles ? */
3007 cpu_fprintf(f
, "Translation buffer state:\n");
3008 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
3009 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3010 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3011 max_target_code_size
);
3012 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3013 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3014 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3015 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3017 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3018 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3020 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3022 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3023 cpu_fprintf(f
, "\nStatistics:\n");
3024 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3025 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3026 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3027 #ifdef CONFIG_PROFILER
3030 tot
= dyngen_interm_time
+ dyngen_code_time
;
3031 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
3033 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
3035 dyngen_tb_count1
- dyngen_tb_count
,
3036 dyngen_tb_count1
? (double)(dyngen_tb_count1
- dyngen_tb_count
) / dyngen_tb_count1
* 100.0 : 0);
3037 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
3038 dyngen_tb_count
? (double)dyngen_op_count
/ dyngen_tb_count
: 0, dyngen_op_count_max
);
3039 cpu_fprintf(f
, "old ops/total ops %0.1f%%\n",
3040 dyngen_op_count
? (double)dyngen_old_op_count
/ dyngen_op_count
* 100.0 : 0);
3041 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
3043 (double)dyngen_tcg_del_op_count
/ dyngen_tb_count
: 0);
3044 cpu_fprintf(f
, "cycles/op %0.1f\n",
3045 dyngen_op_count
? (double)tot
/ dyngen_op_count
: 0);
3046 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
3047 dyngen_code_in_len
? (double)tot
/ dyngen_code_in_len
: 0);
3048 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
3049 dyngen_code_out_len
? (double)tot
/ dyngen_code_out_len
: 0);
3052 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
3053 (double)dyngen_interm_time
/ tot
* 100.0);
3054 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
3055 (double)dyngen_code_time
/ tot
* 100.0);
3056 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
3057 dyngen_restore_count
);
3058 cpu_fprintf(f
, " avg cycles %0.1f\n",
3059 dyngen_restore_count
? (double)dyngen_restore_time
/ dyngen_restore_count
: 0);
3061 extern void dump_op_count(void);
3068 #if !defined(CONFIG_USER_ONLY)
3070 #define MMUSUFFIX _cmmu
3071 #define GETPC() NULL
3072 #define env cpu_single_env
3073 #define SOFTMMU_CODE_ACCESS
3076 #include "softmmu_template.h"
3079 #include "softmmu_template.h"
3082 #include "softmmu_template.h"
3085 #include "softmmu_template.h"