2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
24 #include <sys/types.h>
37 #include "qemu-common.h"
38 #include "cache-utils.h"
40 #if !defined(TARGET_IA64)
48 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #if defined(TARGET_SPARC64)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 41
73 #elif defined(TARGET_SPARC)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 36
75 #elif defined(TARGET_ALPHA)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #define TARGET_VIRT_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_PPC64)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 36
84 #elif defined(TARGET_IA64)
85 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
88 #define TARGET_PHYS_ADDR_SPACE_BITS 32
91 static TranslationBlock
*tbs
;
92 int code_gen_max_blocks
;
93 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
95 /* any access to the tbs or the page table must use this lock */
96 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
98 #if defined(__arm__) || defined(__sparc_v9__)
99 /* The prologue must be reachable with a direct jump. ARM and Sparc64
100 have limited branch ranges (possibly also PPC) so place it in a
101 section close to code segment. */
102 #define code_gen_section \
103 __attribute__((__section__(".gen_code"))) \
104 __attribute__((aligned (32)))
105 #elif defined(_WIN32)
106 /* Maximum alignment for Win32 is 16. */
107 #define code_gen_section \
108 __attribute__((aligned (16)))
110 #define code_gen_section \
111 __attribute__((aligned (32)))
114 uint8_t code_gen_prologue
[1024] code_gen_section
;
115 static uint8_t *code_gen_buffer
;
116 static unsigned long code_gen_buffer_size
;
117 /* threshold to flush the translated code buffer */
118 static unsigned long code_gen_buffer_max_size
;
119 uint8_t *code_gen_ptr
;
121 #if !defined(CONFIG_USER_ONLY)
123 uint8_t *phys_ram_dirty
;
125 static int in_migration
;
127 typedef struct RAMBlock
{
131 struct RAMBlock
*next
;
134 static RAMBlock
*ram_blocks
;
135 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
136 then we can no longer assume contiguous ram offsets, and external uses
137 of this variable will break. */
138 ram_addr_t last_ram_offset
;
142 /* current CPU in the current thread. It is only valid inside
144 CPUState
*cpu_single_env
;
145 /* 0 = Do not count executed instructions.
146 1 = Precise instruction counting.
147 2 = Adaptive rate instruction counting. */
149 /* Current instruction counter. While executing translated code this may
150 include some instructions that have not yet been executed. */
153 typedef struct PageDesc
{
154 /* list of TBs intersecting this ram page */
155 TranslationBlock
*first_tb
;
156 /* in order to optimize self modifying code, we count the number
157 of lookups we do to a given page to use a bitmap */
158 unsigned int code_write_count
;
159 uint8_t *code_bitmap
;
160 #if defined(CONFIG_USER_ONLY)
165 typedef struct PhysPageDesc
{
166 /* offset in host memory of the page + io_index in the low bits */
167 ram_addr_t phys_offset
;
168 ram_addr_t region_offset
;
172 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
173 /* XXX: this is a temporary hack for alpha target.
174 * In the future, this is to be replaced by a multi-level table
175 * to actually be able to handle the complete 64 bits address space.
177 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
179 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
182 #define L1_SIZE (1 << L1_BITS)
183 #define L2_SIZE (1 << L2_BITS)
185 unsigned long qemu_real_host_page_size
;
186 unsigned long qemu_host_page_bits
;
187 unsigned long qemu_host_page_size
;
188 unsigned long qemu_host_page_mask
;
190 /* XXX: for system emulation, it could just be an array */
191 static PageDesc
*l1_map
[L1_SIZE
];
192 static PhysPageDesc
**l1_phys_map
;
194 #if !defined(CONFIG_USER_ONLY)
195 static void io_mem_init(void);
197 /* io memory support */
198 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
199 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
200 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
201 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
202 static int io_mem_watch
;
206 static const char *logfilename
= "/tmp/qemu.log";
209 static int log_append
= 0;
212 static int tlb_flush_count
;
213 static int tb_flush_count
;
214 static int tb_phys_invalidate_count
;
216 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
217 typedef struct subpage_t
{
218 target_phys_addr_t base
;
219 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
220 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
221 void *opaque
[TARGET_PAGE_SIZE
][2][4];
222 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
226 static void map_exec(void *addr
, long size
)
229 VirtualProtect(addr
, size
,
230 PAGE_EXECUTE_READWRITE
, &old_protect
);
234 static void map_exec(void *addr
, long size
)
236 unsigned long start
, end
, page_size
;
238 page_size
= getpagesize();
239 start
= (unsigned long)addr
;
240 start
&= ~(page_size
- 1);
242 end
= (unsigned long)addr
+ size
;
243 end
+= page_size
- 1;
244 end
&= ~(page_size
- 1);
246 mprotect((void *)start
, end
- start
,
247 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
251 static void page_init(void)
253 /* NOTE: we can always suppose that qemu_host_page_size >=
257 SYSTEM_INFO system_info
;
259 GetSystemInfo(&system_info
);
260 qemu_real_host_page_size
= system_info
.dwPageSize
;
263 qemu_real_host_page_size
= getpagesize();
265 if (qemu_host_page_size
== 0)
266 qemu_host_page_size
= qemu_real_host_page_size
;
267 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
268 qemu_host_page_size
= TARGET_PAGE_SIZE
;
269 qemu_host_page_bits
= 0;
270 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
271 qemu_host_page_bits
++;
272 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
273 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
274 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
276 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
278 long long startaddr
, endaddr
;
283 last_brk
= (unsigned long)sbrk(0);
284 f
= fopen("/proc/self/maps", "r");
287 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
289 startaddr
= MIN(startaddr
,
290 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
291 endaddr
= MIN(endaddr
,
292 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
293 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
294 TARGET_PAGE_ALIGN(endaddr
),
305 static inline PageDesc
**page_l1_map(target_ulong index
)
307 #if TARGET_LONG_BITS > 32
308 /* Host memory outside guest VM. For 32-bit targets we have already
309 excluded high addresses. */
310 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
313 return &l1_map
[index
>> L2_BITS
];
316 static inline PageDesc
*page_find_alloc(target_ulong index
)
319 lp
= page_l1_map(index
);
325 /* allocate if not found */
326 #if defined(CONFIG_USER_ONLY)
327 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
328 /* Don't use qemu_malloc because it may recurse. */
329 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
330 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
333 unsigned long addr
= h2g(p
);
334 page_set_flags(addr
& TARGET_PAGE_MASK
,
335 TARGET_PAGE_ALIGN(addr
+ len
),
339 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
343 return p
+ (index
& (L2_SIZE
- 1));
346 static inline PageDesc
*page_find(target_ulong index
)
349 lp
= page_l1_map(index
);
356 return p
+ (index
& (L2_SIZE
- 1));
359 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
364 p
= (void **)l1_phys_map
;
365 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
367 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
368 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
370 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
373 /* allocate if not found */
376 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
377 memset(p
, 0, sizeof(void *) * L1_SIZE
);
381 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
385 /* allocate if not found */
388 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
390 for (i
= 0; i
< L2_SIZE
; i
++) {
391 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
392 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
395 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
398 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
400 return phys_page_find_alloc(index
, 0);
403 #if !defined(CONFIG_USER_ONLY)
404 static void tlb_protect_code(ram_addr_t ram_addr
);
405 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
407 #define mmap_lock() do { } while(0)
408 #define mmap_unlock() do { } while(0)
411 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
413 #if defined(CONFIG_USER_ONLY)
414 /* Currently it is not recommended to allocate big chunks of data in
415 user mode. It will change when a dedicated libc will be used */
416 #define USE_STATIC_CODE_GEN_BUFFER
419 #ifdef USE_STATIC_CODE_GEN_BUFFER
420 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
423 static void code_gen_alloc(unsigned long tb_size
)
428 #ifdef USE_STATIC_CODE_GEN_BUFFER
429 code_gen_buffer
= static_code_gen_buffer
;
430 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
431 map_exec(code_gen_buffer
, code_gen_buffer_size
);
433 code_gen_buffer_size
= tb_size
;
434 if (code_gen_buffer_size
== 0) {
435 #if defined(CONFIG_USER_ONLY)
436 /* in user mode, phys_ram_size is not meaningful */
437 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
439 /* XXX: needs adjustments */
440 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
443 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
444 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
445 /* The code gen buffer location may have constraints depending on
446 the host cpu and OS */
447 #if defined(__linux__)
452 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
453 #if defined(__x86_64__)
455 /* Cannot map more than that */
456 if (code_gen_buffer_size
> (800 * 1024 * 1024))
457 code_gen_buffer_size
= (800 * 1024 * 1024);
458 #elif defined(__sparc_v9__)
459 // Map the buffer below 2G, so we can use direct calls and branches
461 start
= (void *) 0x60000000UL
;
462 if (code_gen_buffer_size
> (512 * 1024 * 1024))
463 code_gen_buffer_size
= (512 * 1024 * 1024);
464 #elif defined(__arm__)
465 /* Map the buffer below 32M, so we can use direct calls and branches */
467 start
= (void *) 0x01000000UL
;
468 if (code_gen_buffer_size
> 16 * 1024 * 1024)
469 code_gen_buffer_size
= 16 * 1024 * 1024;
471 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
472 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
474 if (code_gen_buffer
== MAP_FAILED
) {
475 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
479 #elif defined(__FreeBSD__) || defined(__DragonFly__)
483 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
484 #if defined(__x86_64__)
485 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
486 * 0x40000000 is free */
488 addr
= (void *)0x40000000;
489 /* Cannot map more than that */
490 if (code_gen_buffer_size
> (800 * 1024 * 1024))
491 code_gen_buffer_size
= (800 * 1024 * 1024);
493 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
494 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
496 if (code_gen_buffer
== MAP_FAILED
) {
497 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
502 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
503 map_exec(code_gen_buffer
, code_gen_buffer_size
);
505 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
506 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
507 code_gen_buffer_max_size
= code_gen_buffer_size
-
508 code_gen_max_block_size();
509 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
510 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
513 /* Must be called before using the QEMU cpus. 'tb_size' is the size
514 (in bytes) allocated to the translation buffer. Zero means default
516 void cpu_exec_init_all(unsigned long tb_size
)
519 code_gen_alloc(tb_size
);
520 code_gen_ptr
= code_gen_buffer
;
522 #if !defined(CONFIG_USER_ONLY)
527 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
529 #define CPU_COMMON_SAVE_VERSION 1
531 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
533 CPUState
*env
= opaque
;
535 cpu_synchronize_state(env
, 0);
537 qemu_put_be32s(f
, &env
->halted
);
538 qemu_put_be32s(f
, &env
->interrupt_request
);
541 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
543 CPUState
*env
= opaque
;
545 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
548 qemu_get_be32s(f
, &env
->halted
);
549 qemu_get_be32s(f
, &env
->interrupt_request
);
550 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
551 version_id is increased. */
552 env
->interrupt_request
&= ~0x01;
554 cpu_synchronize_state(env
, 1);
560 CPUState
*qemu_get_cpu(int cpu
)
562 CPUState
*env
= first_cpu
;
565 if (env
->cpu_index
== cpu
)
573 void cpu_exec_init(CPUState
*env
)
578 #if defined(CONFIG_USER_ONLY)
581 env
->next_cpu
= NULL
;
584 while (*penv
!= NULL
) {
585 penv
= &(*penv
)->next_cpu
;
588 env
->cpu_index
= cpu_index
;
590 TAILQ_INIT(&env
->breakpoints
);
591 TAILQ_INIT(&env
->watchpoints
);
593 env
->thread_id
= GetCurrentProcessId();
595 env
->thread_id
= getpid();
598 #if defined(CONFIG_USER_ONLY)
601 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
602 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
603 cpu_common_save
, cpu_common_load
, env
);
604 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
605 cpu_save
, cpu_load
, env
);
609 static inline void invalidate_page_bitmap(PageDesc
*p
)
611 if (p
->code_bitmap
) {
612 qemu_free(p
->code_bitmap
);
613 p
->code_bitmap
= NULL
;
615 p
->code_write_count
= 0;
618 /* set to NULL all the 'first_tb' fields in all PageDescs */
619 static void page_flush_tb(void)
624 for(i
= 0; i
< L1_SIZE
; i
++) {
627 for(j
= 0; j
< L2_SIZE
; j
++) {
629 invalidate_page_bitmap(p
);
636 /* flush all the translation blocks */
637 /* XXX: tb_flush is currently not thread safe */
638 void tb_flush(CPUState
*env1
)
641 #if defined(DEBUG_FLUSH)
642 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
643 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
645 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
647 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
648 cpu_abort(env1
, "Internal error: code buffer overflow\n");
652 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
653 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
656 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
659 code_gen_ptr
= code_gen_buffer
;
660 /* XXX: flush processor icache at this point if cache flush is
665 #ifdef DEBUG_TB_CHECK
667 static void tb_invalidate_check(target_ulong address
)
669 TranslationBlock
*tb
;
671 address
&= TARGET_PAGE_MASK
;
672 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
673 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
674 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
675 address
>= tb
->pc
+ tb
->size
)) {
676 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
677 address
, (long)tb
->pc
, tb
->size
);
683 /* verify that all the pages have correct rights for code */
684 static void tb_page_check(void)
686 TranslationBlock
*tb
;
687 int i
, flags1
, flags2
;
689 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
690 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
691 flags1
= page_get_flags(tb
->pc
);
692 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
693 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
694 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
695 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
701 static void tb_jmp_check(TranslationBlock
*tb
)
703 TranslationBlock
*tb1
;
706 /* suppress any remaining jumps to this TB */
710 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
713 tb1
= tb1
->jmp_next
[n1
];
715 /* check end of list */
717 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
723 /* invalidate one TB */
724 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
727 TranslationBlock
*tb1
;
731 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
734 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
738 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
740 TranslationBlock
*tb1
;
746 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
748 *ptb
= tb1
->page_next
[n1
];
751 ptb
= &tb1
->page_next
[n1
];
755 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
757 TranslationBlock
*tb1
, **ptb
;
760 ptb
= &tb
->jmp_next
[n
];
763 /* find tb(n) in circular list */
767 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
768 if (n1
== n
&& tb1
== tb
)
771 ptb
= &tb1
->jmp_first
;
773 ptb
= &tb1
->jmp_next
[n1
];
776 /* now we can suppress tb(n) from the list */
777 *ptb
= tb
->jmp_next
[n
];
779 tb
->jmp_next
[n
] = NULL
;
783 /* reset the jump entry 'n' of a TB so that it is not chained to
785 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
787 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
790 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
795 target_phys_addr_t phys_pc
;
796 TranslationBlock
*tb1
, *tb2
;
798 /* remove the TB from the hash list */
799 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
800 h
= tb_phys_hash_func(phys_pc
);
801 tb_remove(&tb_phys_hash
[h
], tb
,
802 offsetof(TranslationBlock
, phys_hash_next
));
804 /* remove the TB from the page list */
805 if (tb
->page_addr
[0] != page_addr
) {
806 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
807 tb_page_remove(&p
->first_tb
, tb
);
808 invalidate_page_bitmap(p
);
810 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
811 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
812 tb_page_remove(&p
->first_tb
, tb
);
813 invalidate_page_bitmap(p
);
816 tb_invalidated_flag
= 1;
818 /* remove the TB from the hash list */
819 h
= tb_jmp_cache_hash_func(tb
->pc
);
820 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
821 if (env
->tb_jmp_cache
[h
] == tb
)
822 env
->tb_jmp_cache
[h
] = NULL
;
825 /* suppress this TB from the two jump lists */
826 tb_jmp_remove(tb
, 0);
827 tb_jmp_remove(tb
, 1);
829 /* suppress any remaining jumps to this TB */
835 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
836 tb2
= tb1
->jmp_next
[n1
];
837 tb_reset_jump(tb1
, n1
);
838 tb1
->jmp_next
[n1
] = NULL
;
841 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
843 tb_phys_invalidate_count
++;
846 static inline void set_bits(uint8_t *tab
, int start
, int len
)
852 mask
= 0xff << (start
& 7);
853 if ((start
& ~7) == (end
& ~7)) {
855 mask
&= ~(0xff << (end
& 7));
860 start
= (start
+ 8) & ~7;
862 while (start
< end1
) {
867 mask
= ~(0xff << (end
& 7));
873 static void build_page_bitmap(PageDesc
*p
)
875 int n
, tb_start
, tb_end
;
876 TranslationBlock
*tb
;
878 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
883 tb
= (TranslationBlock
*)((long)tb
& ~3);
884 /* NOTE: this is subtle as a TB may span two physical pages */
886 /* NOTE: tb_end may be after the end of the page, but
887 it is not a problem */
888 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
889 tb_end
= tb_start
+ tb
->size
;
890 if (tb_end
> TARGET_PAGE_SIZE
)
891 tb_end
= TARGET_PAGE_SIZE
;
894 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
896 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
897 tb
= tb
->page_next
[n
];
901 TranslationBlock
*tb_gen_code(CPUState
*env
,
902 target_ulong pc
, target_ulong cs_base
,
903 int flags
, int cflags
)
905 TranslationBlock
*tb
;
907 target_ulong phys_pc
, phys_page2
, virt_page2
;
910 phys_pc
= get_phys_addr_code(env
, pc
);
913 /* flush must be done */
915 /* cannot fail at this point */
917 /* Don't forget to invalidate previous TB info. */
918 tb_invalidated_flag
= 1;
920 tc_ptr
= code_gen_ptr
;
922 tb
->cs_base
= cs_base
;
925 cpu_gen_code(env
, tb
, &code_gen_size
);
926 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
928 /* check next page if needed */
929 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
931 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
932 phys_page2
= get_phys_addr_code(env
, virt_page2
);
934 tb_link_phys(tb
, phys_pc
, phys_page2
);
938 /* invalidate all TBs which intersect with the target physical page
939 starting in range [start;end[. NOTE: start and end must refer to
940 the same physical page. 'is_cpu_write_access' should be true if called
941 from a real cpu write access: the virtual CPU will exit the current
942 TB if code is modified inside this TB. */
943 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
944 int is_cpu_write_access
)
946 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
947 CPUState
*env
= cpu_single_env
;
948 target_ulong tb_start
, tb_end
;
951 #ifdef TARGET_HAS_PRECISE_SMC
952 int current_tb_not_found
= is_cpu_write_access
;
953 TranslationBlock
*current_tb
= NULL
;
954 int current_tb_modified
= 0;
955 target_ulong current_pc
= 0;
956 target_ulong current_cs_base
= 0;
957 int current_flags
= 0;
958 #endif /* TARGET_HAS_PRECISE_SMC */
960 p
= page_find(start
>> TARGET_PAGE_BITS
);
963 if (!p
->code_bitmap
&&
964 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
965 is_cpu_write_access
) {
966 /* build code bitmap */
967 build_page_bitmap(p
);
970 /* we remove all the TBs in the range [start, end[ */
971 /* XXX: see if in some cases it could be faster to invalidate all the code */
975 tb
= (TranslationBlock
*)((long)tb
& ~3);
976 tb_next
= tb
->page_next
[n
];
977 /* NOTE: this is subtle as a TB may span two physical pages */
979 /* NOTE: tb_end may be after the end of the page, but
980 it is not a problem */
981 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
982 tb_end
= tb_start
+ tb
->size
;
984 tb_start
= tb
->page_addr
[1];
985 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
987 if (!(tb_end
<= start
|| tb_start
>= end
)) {
988 #ifdef TARGET_HAS_PRECISE_SMC
989 if (current_tb_not_found
) {
990 current_tb_not_found
= 0;
992 if (env
->mem_io_pc
) {
993 /* now we have a real cpu fault */
994 current_tb
= tb_find_pc(env
->mem_io_pc
);
997 if (current_tb
== tb
&&
998 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
999 /* If we are modifying the current TB, we must stop
1000 its execution. We could be more precise by checking
1001 that the modification is after the current PC, but it
1002 would require a specialized function to partially
1003 restore the CPU state */
1005 current_tb_modified
= 1;
1006 cpu_restore_state(current_tb
, env
,
1007 env
->mem_io_pc
, NULL
);
1008 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1011 #endif /* TARGET_HAS_PRECISE_SMC */
1012 /* we need to do that to handle the case where a signal
1013 occurs while doing tb_phys_invalidate() */
1016 saved_tb
= env
->current_tb
;
1017 env
->current_tb
= NULL
;
1019 tb_phys_invalidate(tb
, -1);
1021 env
->current_tb
= saved_tb
;
1022 if (env
->interrupt_request
&& env
->current_tb
)
1023 cpu_interrupt(env
, env
->interrupt_request
);
1028 #if !defined(CONFIG_USER_ONLY)
1029 /* if no code remaining, no need to continue to use slow writes */
1031 invalidate_page_bitmap(p
);
1032 if (is_cpu_write_access
) {
1033 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1037 #ifdef TARGET_HAS_PRECISE_SMC
1038 if (current_tb_modified
) {
1039 /* we generate a block containing just the instruction
1040 modifying the memory. It will ensure that it cannot modify
1042 env
->current_tb
= NULL
;
1043 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1044 cpu_resume_from_signal(env
, NULL
);
1049 /* len must be <= 8 and start must be a multiple of len */
1050 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1056 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1057 cpu_single_env
->mem_io_vaddr
, len
,
1058 cpu_single_env
->eip
,
1059 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1062 p
= page_find(start
>> TARGET_PAGE_BITS
);
1065 if (p
->code_bitmap
) {
1066 offset
= start
& ~TARGET_PAGE_MASK
;
1067 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1068 if (b
& ((1 << len
) - 1))
1072 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1076 #if !defined(CONFIG_SOFTMMU)
1077 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1078 unsigned long pc
, void *puc
)
1080 TranslationBlock
*tb
;
1083 #ifdef TARGET_HAS_PRECISE_SMC
1084 TranslationBlock
*current_tb
= NULL
;
1085 CPUState
*env
= cpu_single_env
;
1086 int current_tb_modified
= 0;
1087 target_ulong current_pc
= 0;
1088 target_ulong current_cs_base
= 0;
1089 int current_flags
= 0;
1092 addr
&= TARGET_PAGE_MASK
;
1093 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1097 #ifdef TARGET_HAS_PRECISE_SMC
1098 if (tb
&& pc
!= 0) {
1099 current_tb
= tb_find_pc(pc
);
1102 while (tb
!= NULL
) {
1104 tb
= (TranslationBlock
*)((long)tb
& ~3);
1105 #ifdef TARGET_HAS_PRECISE_SMC
1106 if (current_tb
== tb
&&
1107 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1108 /* If we are modifying the current TB, we must stop
1109 its execution. We could be more precise by checking
1110 that the modification is after the current PC, but it
1111 would require a specialized function to partially
1112 restore the CPU state */
1114 current_tb_modified
= 1;
1115 cpu_restore_state(current_tb
, env
, pc
, puc
);
1116 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1119 #endif /* TARGET_HAS_PRECISE_SMC */
1120 tb_phys_invalidate(tb
, addr
);
1121 tb
= tb
->page_next
[n
];
1124 #ifdef TARGET_HAS_PRECISE_SMC
1125 if (current_tb_modified
) {
1126 /* we generate a block containing just the instruction
1127 modifying the memory. It will ensure that it cannot modify
1129 env
->current_tb
= NULL
;
1130 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1131 cpu_resume_from_signal(env
, puc
);
1137 /* add the tb in the target page and protect it if necessary */
1138 static inline void tb_alloc_page(TranslationBlock
*tb
,
1139 unsigned int n
, target_ulong page_addr
)
1142 TranslationBlock
*last_first_tb
;
1144 tb
->page_addr
[n
] = page_addr
;
1145 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1146 tb
->page_next
[n
] = p
->first_tb
;
1147 last_first_tb
= p
->first_tb
;
1148 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1149 invalidate_page_bitmap(p
);
1151 #if defined(TARGET_HAS_SMC) || 1
1153 #if defined(CONFIG_USER_ONLY)
1154 if (p
->flags
& PAGE_WRITE
) {
1159 /* force the host page as non writable (writes will have a
1160 page fault + mprotect overhead) */
1161 page_addr
&= qemu_host_page_mask
;
1163 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1164 addr
+= TARGET_PAGE_SIZE
) {
1166 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1170 p2
->flags
&= ~PAGE_WRITE
;
1171 page_get_flags(addr
);
1173 mprotect(g2h(page_addr
), qemu_host_page_size
,
1174 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1175 #ifdef DEBUG_TB_INVALIDATE
1176 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1181 /* if some code is already present, then the pages are already
1182 protected. So we handle the case where only the first TB is
1183 allocated in a physical page */
1184 if (!last_first_tb
) {
1185 tlb_protect_code(page_addr
);
1189 #endif /* TARGET_HAS_SMC */
1192 /* Allocate a new translation block. Flush the translation buffer if
1193 too many translation blocks or too much generated code. */
1194 TranslationBlock
*tb_alloc(target_ulong pc
)
1196 TranslationBlock
*tb
;
1198 if (nb_tbs
>= code_gen_max_blocks
||
1199 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1201 tb
= &tbs
[nb_tbs
++];
1207 void tb_free(TranslationBlock
*tb
)
1209 /* In practice this is mostly used for single use temporary TB
1210 Ignore the hard cases and just back up if this TB happens to
1211 be the last one generated. */
1212 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1213 code_gen_ptr
= tb
->tc_ptr
;
1218 /* add a new TB and link it to the physical page tables. phys_page2 is
1219 (-1) to indicate that only one page contains the TB. */
1220 void tb_link_phys(TranslationBlock
*tb
,
1221 target_ulong phys_pc
, target_ulong phys_page2
)
1224 TranslationBlock
**ptb
;
1226 /* Grab the mmap lock to stop another thread invalidating this TB
1227 before we are done. */
1229 /* add in the physical hash table */
1230 h
= tb_phys_hash_func(phys_pc
);
1231 ptb
= &tb_phys_hash
[h
];
1232 tb
->phys_hash_next
= *ptb
;
1235 /* add in the page list */
1236 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1237 if (phys_page2
!= -1)
1238 tb_alloc_page(tb
, 1, phys_page2
);
1240 tb
->page_addr
[1] = -1;
1242 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1243 tb
->jmp_next
[0] = NULL
;
1244 tb
->jmp_next
[1] = NULL
;
1246 /* init original jump addresses */
1247 if (tb
->tb_next_offset
[0] != 0xffff)
1248 tb_reset_jump(tb
, 0);
1249 if (tb
->tb_next_offset
[1] != 0xffff)
1250 tb_reset_jump(tb
, 1);
1252 #ifdef DEBUG_TB_CHECK
1258 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1259 tb[1].tc_ptr. Return NULL if not found */
1260 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1262 int m_min
, m_max
, m
;
1264 TranslationBlock
*tb
;
1268 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1269 tc_ptr
>= (unsigned long)code_gen_ptr
)
1271 /* binary search (cf Knuth) */
1274 while (m_min
<= m_max
) {
1275 m
= (m_min
+ m_max
) >> 1;
1277 v
= (unsigned long)tb
->tc_ptr
;
1280 else if (tc_ptr
< v
) {
1289 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1291 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1293 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1296 tb1
= tb
->jmp_next
[n
];
1298 /* find head of list */
1301 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1304 tb1
= tb1
->jmp_next
[n1
];
1306 /* we are now sure now that tb jumps to tb1 */
1309 /* remove tb from the jmp_first list */
1310 ptb
= &tb_next
->jmp_first
;
1314 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1315 if (n1
== n
&& tb1
== tb
)
1317 ptb
= &tb1
->jmp_next
[n1
];
1319 *ptb
= tb
->jmp_next
[n
];
1320 tb
->jmp_next
[n
] = NULL
;
1322 /* suppress the jump to next tb in generated code */
1323 tb_reset_jump(tb
, n
);
1325 /* suppress jumps in the tb on which we could have jumped */
1326 tb_reset_jump_recursive(tb_next
);
1330 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1332 tb_reset_jump_recursive2(tb
, 0);
1333 tb_reset_jump_recursive2(tb
, 1);
1336 #if defined(TARGET_HAS_ICE)
1337 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1339 target_phys_addr_t addr
;
1341 ram_addr_t ram_addr
;
1344 addr
= cpu_get_phys_page_debug(env
, pc
);
1345 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1347 pd
= IO_MEM_UNASSIGNED
;
1349 pd
= p
->phys_offset
;
1351 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1352 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1356 /* Add a watchpoint. */
1357 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1358 int flags
, CPUWatchpoint
**watchpoint
)
1360 target_ulong len_mask
= ~(len
- 1);
1363 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1364 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1365 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1366 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1369 wp
= qemu_malloc(sizeof(*wp
));
1372 wp
->len_mask
= len_mask
;
1375 /* keep all GDB-injected watchpoints in front */
1377 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1379 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1381 tlb_flush_page(env
, addr
);
1388 /* Remove a specific watchpoint. */
1389 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1392 target_ulong len_mask
= ~(len
- 1);
1395 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1396 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1397 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1398 cpu_watchpoint_remove_by_ref(env
, wp
);
1405 /* Remove a specific watchpoint by reference. */
1406 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1408 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1410 tlb_flush_page(env
, watchpoint
->vaddr
);
1412 qemu_free(watchpoint
);
1415 /* Remove all matching watchpoints. */
1416 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1418 CPUWatchpoint
*wp
, *next
;
1420 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1421 if (wp
->flags
& mask
)
1422 cpu_watchpoint_remove_by_ref(env
, wp
);
1426 /* Add a breakpoint. */
1427 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1428 CPUBreakpoint
**breakpoint
)
1430 #if defined(TARGET_HAS_ICE)
1433 bp
= qemu_malloc(sizeof(*bp
));
1438 /* keep all GDB-injected breakpoints in front */
1440 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1442 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1444 breakpoint_invalidate(env
, pc
);
1454 /* Remove a specific breakpoint. */
1455 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1457 #if defined(TARGET_HAS_ICE)
1460 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1461 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1462 cpu_breakpoint_remove_by_ref(env
, bp
);
1472 /* Remove a specific breakpoint by reference. */
1473 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1475 #if defined(TARGET_HAS_ICE)
1476 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1478 breakpoint_invalidate(env
, breakpoint
->pc
);
1480 qemu_free(breakpoint
);
1484 /* Remove all matching breakpoints. */
1485 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1487 #if defined(TARGET_HAS_ICE)
1488 CPUBreakpoint
*bp
, *next
;
1490 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1491 if (bp
->flags
& mask
)
1492 cpu_breakpoint_remove_by_ref(env
, bp
);
1497 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1498 CPU loop after each instruction */
1499 void cpu_single_step(CPUState
*env
, int enabled
)
1501 #if defined(TARGET_HAS_ICE)
1502 if (env
->singlestep_enabled
!= enabled
) {
1503 env
->singlestep_enabled
= enabled
;
1505 kvm_update_guest_debug(env
, 0);
1507 /* must flush all the translated code to avoid inconsistencies */
1508 /* XXX: only flush what is necessary */
1515 /* enable or disable low levels log */
1516 void cpu_set_log(int log_flags
)
1518 loglevel
= log_flags
;
1519 if (loglevel
&& !logfile
) {
1520 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1522 perror(logfilename
);
1525 #if !defined(CONFIG_SOFTMMU)
1526 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1528 static char logfile_buf
[4096];
1529 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1532 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1536 if (!loglevel
&& logfile
) {
1542 void cpu_set_log_filename(const char *filename
)
1544 logfilename
= strdup(filename
);
1549 cpu_set_log(loglevel
);
1552 static void cpu_unlink_tb(CPUState
*env
)
1554 #if defined(USE_NPTL)
1555 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1556 problem and hope the cpu will stop of its own accord. For userspace
1557 emulation this often isn't actually as bad as it sounds. Often
1558 signals are used primarily to interrupt blocking syscalls. */
1560 TranslationBlock
*tb
;
1561 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1563 tb
= env
->current_tb
;
1564 /* if the cpu is currently executing code, we must unlink it and
1565 all the potentially executing TB */
1566 if (tb
&& !testandset(&interrupt_lock
)) {
1567 env
->current_tb
= NULL
;
1568 tb_reset_jump_recursive(tb
);
1569 resetlock(&interrupt_lock
);
1574 /* mask must never be zero, except for A20 change call */
1575 void cpu_interrupt(CPUState
*env
, int mask
)
1579 old_mask
= env
->interrupt_request
;
1580 env
->interrupt_request
|= mask
;
1581 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1582 kvm_update_interrupt_request(env
);
1584 #ifndef CONFIG_USER_ONLY
1586 * If called from iothread context, wake the target cpu in
1589 if (!qemu_cpu_self(env
)) {
1596 env
->icount_decr
.u16
.high
= 0xffff;
1597 #ifndef CONFIG_USER_ONLY
1599 && (mask
& ~old_mask
) != 0) {
1600 cpu_abort(env
, "Raised interrupt while not in I/O function");
1608 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1610 env
->interrupt_request
&= ~mask
;
1613 void cpu_exit(CPUState
*env
)
1615 env
->exit_request
= 1;
1619 const CPULogItem cpu_log_items
[] = {
1620 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1621 "show generated host assembly code for each compiled TB" },
1622 { CPU_LOG_TB_IN_ASM
, "in_asm",
1623 "show target assembly code for each compiled TB" },
1624 { CPU_LOG_TB_OP
, "op",
1625 "show micro ops for each compiled TB" },
1626 { CPU_LOG_TB_OP_OPT
, "op_opt",
1629 "before eflags optimization and "
1631 "after liveness analysis" },
1632 { CPU_LOG_INT
, "int",
1633 "show interrupts/exceptions in short format" },
1634 { CPU_LOG_EXEC
, "exec",
1635 "show trace before each executed TB (lots of logs)" },
1636 { CPU_LOG_TB_CPU
, "cpu",
1637 "show CPU state before block translation" },
1639 { CPU_LOG_PCALL
, "pcall",
1640 "show protected mode far calls/returns/exceptions" },
1641 { CPU_LOG_RESET
, "cpu_reset",
1642 "show CPU state before CPU resets" },
1645 { CPU_LOG_IOPORT
, "ioport",
1646 "show all i/o ports accesses" },
1651 static int cmp1(const char *s1
, int n
, const char *s2
)
1653 if (strlen(s2
) != n
)
1655 return memcmp(s1
, s2
, n
) == 0;
1658 /* takes a comma separated list of log masks. Return 0 if error. */
1659 int cpu_str_to_log_mask(const char *str
)
1661 const CPULogItem
*item
;
1668 p1
= strchr(p
, ',');
1671 if(cmp1(p
,p1
-p
,"all")) {
1672 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1676 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1677 if (cmp1(p
, p1
- p
, item
->name
))
1691 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1698 fprintf(stderr
, "qemu: fatal: ");
1699 vfprintf(stderr
, fmt
, ap
);
1700 fprintf(stderr
, "\n");
1702 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1704 cpu_dump_state(env
, stderr
, fprintf
, 0);
1706 if (qemu_log_enabled()) {
1707 qemu_log("qemu: fatal: ");
1708 qemu_log_vprintf(fmt
, ap2
);
1711 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1713 log_cpu_state(env
, 0);
1723 CPUState
*cpu_copy(CPUState
*env
)
1725 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1726 CPUState
*next_cpu
= new_env
->next_cpu
;
1727 int cpu_index
= new_env
->cpu_index
;
1728 #if defined(TARGET_HAS_ICE)
1733 memcpy(new_env
, env
, sizeof(CPUState
));
1735 /* Preserve chaining and index. */
1736 new_env
->next_cpu
= next_cpu
;
1737 new_env
->cpu_index
= cpu_index
;
1739 /* Clone all break/watchpoints.
1740 Note: Once we support ptrace with hw-debug register access, make sure
1741 BP_CPU break/watchpoints are handled correctly on clone. */
1742 TAILQ_INIT(&env
->breakpoints
);
1743 TAILQ_INIT(&env
->watchpoints
);
1744 #if defined(TARGET_HAS_ICE)
1745 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1746 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1748 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1749 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1757 #if !defined(CONFIG_USER_ONLY)
1759 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1763 /* Discard jump cache entries for any tb which might potentially
1764 overlap the flushed page. */
1765 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1766 memset (&env
->tb_jmp_cache
[i
], 0,
1767 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1769 i
= tb_jmp_cache_hash_page(addr
);
1770 memset (&env
->tb_jmp_cache
[i
], 0,
1771 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1774 /* NOTE: if flush_global is true, also flush global entries (not
1776 void tlb_flush(CPUState
*env
, int flush_global
)
1780 #if defined(DEBUG_TLB)
1781 printf("tlb_flush:\n");
1783 /* must reset current TB so that interrupts cannot modify the
1784 links while we are modifying them */
1785 env
->current_tb
= NULL
;
1787 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1789 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1790 env
->tlb_table
[mmu_idx
][i
].addr_read
= -1;
1791 env
->tlb_table
[mmu_idx
][i
].addr_write
= -1;
1792 env
->tlb_table
[mmu_idx
][i
].addr_code
= -1;
1796 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1799 if (env
->kqemu_enabled
) {
1800 kqemu_flush(env
, flush_global
);
1806 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1808 if (addr
== (tlb_entry
->addr_read
&
1809 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1810 addr
== (tlb_entry
->addr_write
&
1811 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1812 addr
== (tlb_entry
->addr_code
&
1813 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1814 tlb_entry
->addr_read
= -1;
1815 tlb_entry
->addr_write
= -1;
1816 tlb_entry
->addr_code
= -1;
1820 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1825 #if defined(DEBUG_TLB)
1826 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1828 /* must reset current TB so that interrupts cannot modify the
1829 links while we are modifying them */
1830 env
->current_tb
= NULL
;
1832 addr
&= TARGET_PAGE_MASK
;
1833 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1834 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1835 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1837 tlb_flush_jmp_cache(env
, addr
);
1840 if (env
->kqemu_enabled
) {
1841 kqemu_flush_page(env
, addr
);
1846 /* update the TLBs so that writes to code in the virtual page 'addr'
1848 static void tlb_protect_code(ram_addr_t ram_addr
)
1850 cpu_physical_memory_reset_dirty(ram_addr
,
1851 ram_addr
+ TARGET_PAGE_SIZE
,
1855 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1856 tested for self modifying code */
1857 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1860 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1863 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1864 unsigned long start
, unsigned long length
)
1867 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1868 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1869 if ((addr
- start
) < length
) {
1870 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1875 /* Note: start and end must be within the same ram block. */
1876 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1880 unsigned long length
, start1
;
1884 start
&= TARGET_PAGE_MASK
;
1885 end
= TARGET_PAGE_ALIGN(end
);
1887 length
= end
- start
;
1890 len
= length
>> TARGET_PAGE_BITS
;
1892 /* XXX: should not depend on cpu context */
1894 if (env
->kqemu_enabled
) {
1897 for(i
= 0; i
< len
; i
++) {
1898 kqemu_set_notdirty(env
, addr
);
1899 addr
+= TARGET_PAGE_SIZE
;
1903 mask
= ~dirty_flags
;
1904 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1905 for(i
= 0; i
< len
; i
++)
1908 /* we modify the TLB cache so that the dirty bit will be set again
1909 when accessing the range */
1910 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1911 /* Chek that we don't span multiple blocks - this breaks the
1912 address comparisons below. */
1913 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1914 != (end
- 1) - start
) {
1918 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1920 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1921 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1922 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1928 int cpu_physical_memory_set_dirty_tracking(int enable
)
1930 if (kvm_enabled()) {
1931 return kvm_set_migration_log(enable
);
1936 int cpu_physical_memory_get_dirty_tracking(void)
1938 return in_migration
;
1941 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
1942 target_phys_addr_t end_addr
)
1947 ret
= kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1951 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1953 ram_addr_t ram_addr
;
1956 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1957 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
1958 + tlb_entry
->addend
);
1959 ram_addr
= qemu_ram_addr_from_host(p
);
1960 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1961 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1966 /* update the TLB according to the current state of the dirty bits */
1967 void cpu_tlb_update_dirty(CPUState
*env
)
1971 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1972 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1973 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
1977 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1979 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1980 tlb_entry
->addr_write
= vaddr
;
1983 /* update the TLB corresponding to virtual page vaddr
1984 so that it is no longer dirty */
1985 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1990 vaddr
&= TARGET_PAGE_MASK
;
1991 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1992 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1993 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
1996 /* add a new TLB entry. At most one entry for a given virtual address
1997 is permitted. Return 0 if OK or 2 if the page could not be mapped
1998 (can only happen in non SOFTMMU mode for I/O pages or pages
1999 conflicting with the host address space). */
2000 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2001 target_phys_addr_t paddr
, int prot
,
2002 int mmu_idx
, int is_softmmu
)
2007 target_ulong address
;
2008 target_ulong code_address
;
2009 target_phys_addr_t addend
;
2013 target_phys_addr_t iotlb
;
2015 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2017 pd
= IO_MEM_UNASSIGNED
;
2019 pd
= p
->phys_offset
;
2021 #if defined(DEBUG_TLB)
2022 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2023 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2028 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2029 /* IO memory case (romd handled later) */
2030 address
|= TLB_MMIO
;
2032 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2033 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2035 iotlb
= pd
& TARGET_PAGE_MASK
;
2036 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2037 iotlb
|= IO_MEM_NOTDIRTY
;
2039 iotlb
|= IO_MEM_ROM
;
2041 /* IO handlers are currently passed a physical address.
2042 It would be nice to pass an offset from the base address
2043 of that region. This would avoid having to special case RAM,
2044 and avoid full address decoding in every device.
2045 We can't use the high bits of pd for this because
2046 IO_MEM_ROMD uses these as a ram address. */
2047 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2049 iotlb
+= p
->region_offset
;
2055 code_address
= address
;
2056 /* Make accesses to pages with watchpoints go via the
2057 watchpoint trap routines. */
2058 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2059 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2060 iotlb
= io_mem_watch
+ paddr
;
2061 /* TODO: The memory case can be optimized by not trapping
2062 reads of pages with a write breakpoint. */
2063 address
|= TLB_MMIO
;
2067 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2068 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2069 te
= &env
->tlb_table
[mmu_idx
][index
];
2070 te
->addend
= addend
- vaddr
;
2071 if (prot
& PAGE_READ
) {
2072 te
->addr_read
= address
;
2077 if (prot
& PAGE_EXEC
) {
2078 te
->addr_code
= code_address
;
2082 if (prot
& PAGE_WRITE
) {
2083 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2084 (pd
& IO_MEM_ROMD
)) {
2085 /* Write access calls the I/O callback. */
2086 te
->addr_write
= address
| TLB_MMIO
;
2087 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2088 !cpu_physical_memory_is_dirty(pd
)) {
2089 te
->addr_write
= address
| TLB_NOTDIRTY
;
2091 te
->addr_write
= address
;
2094 te
->addr_write
= -1;
2101 void tlb_flush(CPUState
*env
, int flush_global
)
2105 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2109 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2110 target_phys_addr_t paddr
, int prot
,
2111 int mmu_idx
, int is_softmmu
)
2117 * Walks guest process memory "regions" one by one
2118 * and calls callback function 'fn' for each region.
2120 int walk_memory_regions(void *priv
,
2121 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2123 unsigned long start
, end
;
2125 int i
, j
, prot
, prot1
;
2131 for (i
= 0; i
<= L1_SIZE
; i
++) {
2132 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2133 for (j
= 0; j
< L2_SIZE
; j
++) {
2134 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2136 * "region" is one continuous chunk of memory
2137 * that has same protection flags set.
2139 if (prot1
!= prot
) {
2140 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2142 rc
= (*fn
)(priv
, start
, end
, prot
);
2143 /* callback can stop iteration by returning != 0 */
2160 static int dump_region(void *priv
, unsigned long start
,
2161 unsigned long end
, unsigned long prot
)
2163 FILE *f
= (FILE *)priv
;
2165 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2166 start
, end
, end
- start
,
2167 ((prot
& PAGE_READ
) ? 'r' : '-'),
2168 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2169 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2174 /* dump memory mappings */
2175 void page_dump(FILE *f
)
2177 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2178 "start", "end", "size", "prot");
2179 walk_memory_regions(f
, dump_region
);
2182 int page_get_flags(target_ulong address
)
2186 p
= page_find(address
>> TARGET_PAGE_BITS
);
2192 /* modify the flags of a page and invalidate the code if
2193 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2194 depending on PAGE_WRITE */
2195 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2200 /* mmap_lock should already be held. */
2201 start
= start
& TARGET_PAGE_MASK
;
2202 end
= TARGET_PAGE_ALIGN(end
);
2203 if (flags
& PAGE_WRITE
)
2204 flags
|= PAGE_WRITE_ORG
;
2205 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2206 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2207 /* We may be called for host regions that are outside guest
2211 /* if the write protection is set, then we invalidate the code
2213 if (!(p
->flags
& PAGE_WRITE
) &&
2214 (flags
& PAGE_WRITE
) &&
2216 tb_invalidate_phys_page(addr
, 0, NULL
);
2222 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2228 if (start
+ len
< start
)
2229 /* we've wrapped around */
2232 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2233 start
= start
& TARGET_PAGE_MASK
;
2235 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2236 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2239 if( !(p
->flags
& PAGE_VALID
) )
2242 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2244 if (flags
& PAGE_WRITE
) {
2245 if (!(p
->flags
& PAGE_WRITE_ORG
))
2247 /* unprotect the page if it was put read-only because it
2248 contains translated code */
2249 if (!(p
->flags
& PAGE_WRITE
)) {
2250 if (!page_unprotect(addr
, 0, NULL
))
2259 /* called from signal handler: invalidate the code and unprotect the
2260 page. Return TRUE if the fault was successfully handled. */
2261 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2263 unsigned int page_index
, prot
, pindex
;
2265 target_ulong host_start
, host_end
, addr
;
2267 /* Technically this isn't safe inside a signal handler. However we
2268 know this only ever happens in a synchronous SEGV handler, so in
2269 practice it seems to be ok. */
2272 host_start
= address
& qemu_host_page_mask
;
2273 page_index
= host_start
>> TARGET_PAGE_BITS
;
2274 p1
= page_find(page_index
);
2279 host_end
= host_start
+ qemu_host_page_size
;
2282 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2286 /* if the page was really writable, then we change its
2287 protection back to writable */
2288 if (prot
& PAGE_WRITE_ORG
) {
2289 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2290 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2291 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2292 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2293 p1
[pindex
].flags
|= PAGE_WRITE
;
2294 /* and since the content will be modified, we must invalidate
2295 the corresponding translated code. */
2296 tb_invalidate_phys_page(address
, pc
, puc
);
2297 #ifdef DEBUG_TB_CHECK
2298 tb_invalidate_check(address
);
2308 static inline void tlb_set_dirty(CPUState
*env
,
2309 unsigned long addr
, target_ulong vaddr
)
2312 #endif /* defined(CONFIG_USER_ONLY) */
2314 #if !defined(CONFIG_USER_ONLY)
2316 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2317 ram_addr_t memory
, ram_addr_t region_offset
);
2318 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2319 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2320 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2323 if (addr > start_addr) \
2326 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2327 if (start_addr2 > 0) \
2331 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2332 end_addr2 = TARGET_PAGE_SIZE - 1; \
2334 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2335 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2340 /* register physical memory. 'size' must be a multiple of the target
2341 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2342 io memory page. The address used when calling the IO function is
2343 the offset from the start of the region, plus region_offset. Both
2344 start_addr and region_offset are rounded down to a page boundary
2345 before calculating this offset. This should not be a problem unless
2346 the low bits of start_addr and region_offset differ. */
2347 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2349 ram_addr_t phys_offset
,
2350 ram_addr_t region_offset
)
2352 target_phys_addr_t addr
, end_addr
;
2355 ram_addr_t orig_size
= size
;
2359 /* XXX: should not depend on cpu context */
2361 if (env
->kqemu_enabled
) {
2362 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2366 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2368 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2369 region_offset
= start_addr
;
2371 region_offset
&= TARGET_PAGE_MASK
;
2372 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2373 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2374 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2375 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2376 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2377 ram_addr_t orig_memory
= p
->phys_offset
;
2378 target_phys_addr_t start_addr2
, end_addr2
;
2379 int need_subpage
= 0;
2381 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2383 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2384 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2385 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2386 &p
->phys_offset
, orig_memory
,
2389 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2392 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2394 p
->region_offset
= 0;
2396 p
->phys_offset
= phys_offset
;
2397 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2398 (phys_offset
& IO_MEM_ROMD
))
2399 phys_offset
+= TARGET_PAGE_SIZE
;
2402 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2403 p
->phys_offset
= phys_offset
;
2404 p
->region_offset
= region_offset
;
2405 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2406 (phys_offset
& IO_MEM_ROMD
)) {
2407 phys_offset
+= TARGET_PAGE_SIZE
;
2409 target_phys_addr_t start_addr2
, end_addr2
;
2410 int need_subpage
= 0;
2412 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2413 end_addr2
, need_subpage
);
2415 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2416 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2417 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2418 addr
& TARGET_PAGE_MASK
);
2419 subpage_register(subpage
, start_addr2
, end_addr2
,
2420 phys_offset
, region_offset
);
2421 p
->region_offset
= 0;
2425 region_offset
+= TARGET_PAGE_SIZE
;
2428 /* since each CPU stores ram addresses in its TLB cache, we must
2429 reset the modified entries */
2431 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2436 /* XXX: temporary until new memory mapping API */
2437 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2441 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2443 return IO_MEM_UNASSIGNED
;
2444 return p
->phys_offset
;
2447 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2450 kvm_coalesce_mmio_region(addr
, size
);
2453 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2456 kvm_uncoalesce_mmio_region(addr
, size
);
2460 /* XXX: better than nothing */
2461 static ram_addr_t
kqemu_ram_alloc(ram_addr_t size
)
2464 if ((last_ram_offset
+ size
) > kqemu_phys_ram_size
) {
2465 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2466 (uint64_t)size
, (uint64_t)kqemu_phys_ram_size
);
2469 addr
= last_ram_offset
;
2470 last_ram_offset
= TARGET_PAGE_ALIGN(last_ram_offset
+ size
);
2477 #include <sys/vfs.h>
2479 #define HUGETLBFS_MAGIC 0x958458f6
2481 static long gethugepagesize(const char *path
)
2487 ret
= statfs(path
, &fs
);
2488 } while (ret
!= 0 && errno
== EINTR
);
2495 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2496 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2501 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2509 unsigned long hpagesize
;
2510 extern int mem_prealloc
;
2516 hpagesize
= gethugepagesize(path
);
2521 if (memory
< hpagesize
) {
2525 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2526 fprintf(stderr
, "host lacks mmu notifiers, disabling --mem-path\n");
2530 if (asprintf(&filename
, "%s/kvm.XXXXXX", path
) == -1) {
2534 fd
= mkstemp(filename
);
2543 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2546 * ftruncate is not supported by hugetlbfs in older
2547 * hosts, so don't bother checking for errors.
2548 * If anything goes wrong with it under other filesystems,
2551 ftruncate(fd
, memory
);
2554 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2555 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2556 * to sidestep this quirk.
2558 flags
= mem_prealloc
? MAP_POPULATE
|MAP_SHARED
: MAP_PRIVATE
;
2559 area
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, flags
, fd
, 0);
2561 area
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2563 if (area
== MAP_FAILED
) {
2564 perror("alloc_mem_area: can't mmap hugetlbfs pages");
2573 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2580 extern const char *mem_path
;
2582 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2584 RAMBlock
*new_block
;
2587 if (kqemu_phys_ram_base
) {
2588 return kqemu_ram_alloc(size
);
2592 size
= TARGET_PAGE_ALIGN(size
);
2593 new_block
= qemu_malloc(sizeof(*new_block
));
2595 new_block
->host
= file_ram_alloc(size
, mem_path
);
2596 if (!new_block
->host
) {
2597 new_block
->host
= qemu_vmalloc(size
);
2599 new_block
->offset
= last_ram_offset
;
2600 new_block
->length
= size
;
2602 new_block
->next
= ram_blocks
;
2603 ram_blocks
= new_block
;
2605 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2606 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2607 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2608 0xff, size
>> TARGET_PAGE_BITS
);
2610 last_ram_offset
+= size
;
2613 kvm_setup_guest_memory(new_block
->host
, size
);
2615 return new_block
->offset
;
2618 void qemu_ram_free(ram_addr_t addr
)
2620 /* TODO: implement this. */
2623 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2624 With the exception of the softmmu code in this file, this should
2625 only be used for local memory (e.g. video ram) that the device owns,
2626 and knows it isn't going to access beyond the end of the block.
2628 It should not be used for general purpose DMA.
2629 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2631 void *qemu_get_ram_ptr(ram_addr_t addr
)
2638 if (kqemu_phys_ram_base
) {
2639 return kqemu_phys_ram_base
+ addr
;
2644 prevp
= &ram_blocks
;
2646 while (block
&& (block
->offset
> addr
2647 || block
->offset
+ block
->length
<= addr
)) {
2649 prevp
= &prev
->next
;
2651 block
= block
->next
;
2654 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2657 /* Move this entry to to start of the list. */
2659 prev
->next
= block
->next
;
2660 block
->next
= *prevp
;
2663 return block
->host
+ (addr
- block
->offset
);
2666 /* Some of the softmmu routines need to translate from a host pointer
2667 (typically a TLB entry) back to a ram offset. */
2668 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2673 uint8_t *host
= ptr
;
2676 if (kqemu_phys_ram_base
) {
2677 return host
- kqemu_phys_ram_base
;
2682 prevp
= &ram_blocks
;
2684 while (block
&& (block
->host
> host
2685 || block
->host
+ block
->length
<= host
)) {
2687 prevp
= &prev
->next
;
2689 block
= block
->next
;
2692 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2695 return block
->offset
+ (host
- block
->host
);
2698 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2700 #ifdef DEBUG_UNASSIGNED
2701 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2703 #if defined(TARGET_SPARC)
2704 do_unassigned_access(addr
, 0, 0, 0, 1);
2709 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2711 #ifdef DEBUG_UNASSIGNED
2712 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2714 #if defined(TARGET_SPARC)
2715 do_unassigned_access(addr
, 0, 0, 0, 2);
2720 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2722 #ifdef DEBUG_UNASSIGNED
2723 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2725 #if defined(TARGET_SPARC)
2726 do_unassigned_access(addr
, 0, 0, 0, 4);
2731 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2733 #ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2736 #if defined(TARGET_SPARC)
2737 do_unassigned_access(addr
, 1, 0, 0, 1);
2741 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2743 #ifdef DEBUG_UNASSIGNED
2744 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2746 #if defined(TARGET_SPARC)
2747 do_unassigned_access(addr
, 1, 0, 0, 2);
2751 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2753 #ifdef DEBUG_UNASSIGNED
2754 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2756 #if defined(TARGET_SPARC)
2757 do_unassigned_access(addr
, 1, 0, 0, 4);
2761 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2762 unassigned_mem_readb
,
2763 unassigned_mem_readw
,
2764 unassigned_mem_readl
,
2767 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2768 unassigned_mem_writeb
,
2769 unassigned_mem_writew
,
2770 unassigned_mem_writel
,
2773 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2777 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2778 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2779 #if !defined(CONFIG_USER_ONLY)
2780 tb_invalidate_phys_page_fast(ram_addr
, 1);
2781 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2784 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2786 if (cpu_single_env
->kqemu_enabled
&&
2787 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2788 kqemu_modify_page(cpu_single_env
, ram_addr
);
2790 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2791 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2792 /* we remove the notdirty callback only if the code has been
2794 if (dirty_flags
== 0xff)
2795 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2798 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2802 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2803 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2804 #if !defined(CONFIG_USER_ONLY)
2805 tb_invalidate_phys_page_fast(ram_addr
, 2);
2806 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2809 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2811 if (cpu_single_env
->kqemu_enabled
&&
2812 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2813 kqemu_modify_page(cpu_single_env
, ram_addr
);
2815 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2816 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2817 /* we remove the notdirty callback only if the code has been
2819 if (dirty_flags
== 0xff)
2820 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2823 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2827 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2828 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2829 #if !defined(CONFIG_USER_ONLY)
2830 tb_invalidate_phys_page_fast(ram_addr
, 4);
2831 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2834 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2836 if (cpu_single_env
->kqemu_enabled
&&
2837 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2838 kqemu_modify_page(cpu_single_env
, ram_addr
);
2840 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2841 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2842 /* we remove the notdirty callback only if the code has been
2844 if (dirty_flags
== 0xff)
2845 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2848 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2849 NULL
, /* never used */
2850 NULL
, /* never used */
2851 NULL
, /* never used */
2854 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2855 notdirty_mem_writeb
,
2856 notdirty_mem_writew
,
2857 notdirty_mem_writel
,
2860 /* Generate a debug exception if a watchpoint has been hit. */
2861 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2863 CPUState
*env
= cpu_single_env
;
2864 target_ulong pc
, cs_base
;
2865 TranslationBlock
*tb
;
2870 if (env
->watchpoint_hit
) {
2871 /* We re-entered the check after replacing the TB. Now raise
2872 * the debug interrupt so that is will trigger after the
2873 * current instruction. */
2874 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2877 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2878 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2879 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2880 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2881 wp
->flags
|= BP_WATCHPOINT_HIT
;
2882 if (!env
->watchpoint_hit
) {
2883 env
->watchpoint_hit
= wp
;
2884 tb
= tb_find_pc(env
->mem_io_pc
);
2886 cpu_abort(env
, "check_watchpoint: could not find TB for "
2887 "pc=%p", (void *)env
->mem_io_pc
);
2889 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2890 tb_phys_invalidate(tb
, -1);
2891 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2892 env
->exception_index
= EXCP_DEBUG
;
2894 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2895 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2897 cpu_resume_from_signal(env
, NULL
);
2900 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2905 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2906 so these check for a hit then pass through to the normal out-of-line
2908 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2910 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2911 return ldub_phys(addr
);
2914 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2916 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2917 return lduw_phys(addr
);
2920 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2922 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2923 return ldl_phys(addr
);
2926 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2929 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2930 stb_phys(addr
, val
);
2933 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2936 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2937 stw_phys(addr
, val
);
2940 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2943 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2944 stl_phys(addr
, val
);
2947 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2953 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2959 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2965 idx
= SUBPAGE_IDX(addr
);
2966 #if defined(DEBUG_SUBPAGE)
2967 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2968 mmio
, len
, addr
, idx
);
2970 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2971 addr
+ mmio
->region_offset
[idx
][0][len
]);
2976 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2977 uint32_t value
, unsigned int len
)
2981 idx
= SUBPAGE_IDX(addr
);
2982 #if defined(DEBUG_SUBPAGE)
2983 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2984 mmio
, len
, addr
, idx
, value
);
2986 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2987 addr
+ mmio
->region_offset
[idx
][1][len
],
2991 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2993 #if defined(DEBUG_SUBPAGE)
2994 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2997 return subpage_readlen(opaque
, addr
, 0);
3000 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3003 #if defined(DEBUG_SUBPAGE)
3004 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3006 subpage_writelen(opaque
, addr
, value
, 0);
3009 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3011 #if defined(DEBUG_SUBPAGE)
3012 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3015 return subpage_readlen(opaque
, addr
, 1);
3018 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3021 #if defined(DEBUG_SUBPAGE)
3022 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3024 subpage_writelen(opaque
, addr
, value
, 1);
3027 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3029 #if defined(DEBUG_SUBPAGE)
3030 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3033 return subpage_readlen(opaque
, addr
, 2);
3036 static void subpage_writel (void *opaque
,
3037 target_phys_addr_t addr
, uint32_t value
)
3039 #if defined(DEBUG_SUBPAGE)
3040 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3042 subpage_writelen(opaque
, addr
, value
, 2);
3045 static CPUReadMemoryFunc
*subpage_read
[] = {
3051 static CPUWriteMemoryFunc
*subpage_write
[] = {
3057 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3058 ram_addr_t memory
, ram_addr_t region_offset
)
3063 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3065 idx
= SUBPAGE_IDX(start
);
3066 eidx
= SUBPAGE_IDX(end
);
3067 #if defined(DEBUG_SUBPAGE)
3068 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
3069 mmio
, start
, end
, idx
, eidx
, memory
);
3071 memory
>>= IO_MEM_SHIFT
;
3072 for (; idx
<= eidx
; idx
++) {
3073 for (i
= 0; i
< 4; i
++) {
3074 if (io_mem_read
[memory
][i
]) {
3075 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3076 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3077 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3079 if (io_mem_write
[memory
][i
]) {
3080 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3081 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3082 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3090 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3091 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3096 mmio
= qemu_mallocz(sizeof(subpage_t
));
3099 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3100 #if defined(DEBUG_SUBPAGE)
3101 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3102 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3104 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3105 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3111 static int get_free_io_mem_idx(void)
3115 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3116 if (!io_mem_used
[i
]) {
3124 /* mem_read and mem_write are arrays of functions containing the
3125 function to access byte (index 0), word (index 1) and dword (index
3126 2). Functions can be omitted with a NULL function pointer.
3127 If io_index is non zero, the corresponding io zone is
3128 modified. If it is zero, a new io zone is allocated. The return
3129 value can be used with cpu_register_physical_memory(). (-1) is
3130 returned if error. */
3131 static int cpu_register_io_memory_fixed(int io_index
,
3132 CPUReadMemoryFunc
**mem_read
,
3133 CPUWriteMemoryFunc
**mem_write
,
3136 int i
, subwidth
= 0;
3138 if (io_index
<= 0) {
3139 io_index
= get_free_io_mem_idx();
3143 io_index
>>= IO_MEM_SHIFT
;
3144 if (io_index
>= IO_MEM_NB_ENTRIES
)
3148 for(i
= 0;i
< 3; i
++) {
3149 if (!mem_read
[i
] || !mem_write
[i
])
3150 subwidth
= IO_MEM_SUBWIDTH
;
3151 io_mem_read
[io_index
][i
] = mem_read
[i
];
3152 io_mem_write
[io_index
][i
] = mem_write
[i
];
3154 io_mem_opaque
[io_index
] = opaque
;
3155 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3158 int cpu_register_io_memory(CPUReadMemoryFunc
**mem_read
,
3159 CPUWriteMemoryFunc
**mem_write
,
3162 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3165 void cpu_unregister_io_memory(int io_table_address
)
3168 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3170 for (i
=0;i
< 3; i
++) {
3171 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3172 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3174 io_mem_opaque
[io_index
] = NULL
;
3175 io_mem_used
[io_index
] = 0;
3178 static void io_mem_init(void)
3182 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3183 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3184 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3188 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3189 watch_mem_write
, NULL
);
3191 if (kqemu_phys_ram_base
) {
3192 /* alloc dirty bits array */
3193 phys_ram_dirty
= qemu_vmalloc(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3194 memset(phys_ram_dirty
, 0xff, kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3199 #endif /* !defined(CONFIG_USER_ONLY) */
3201 /* physical memory access (slow version, mainly for debug) */
3202 #if defined(CONFIG_USER_ONLY)
3203 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3204 int len
, int is_write
)
3211 page
= addr
& TARGET_PAGE_MASK
;
3212 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3215 flags
= page_get_flags(page
);
3216 if (!(flags
& PAGE_VALID
))
3219 if (!(flags
& PAGE_WRITE
))
3221 /* XXX: this code should not depend on lock_user */
3222 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3223 /* FIXME - should this return an error rather than just fail? */
3226 unlock_user(p
, addr
, l
);
3228 if (!(flags
& PAGE_READ
))
3230 /* XXX: this code should not depend on lock_user */
3231 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3232 /* FIXME - should this return an error rather than just fail? */
3235 unlock_user(p
, addr
, 0);
3244 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3245 int len
, int is_write
)
3250 target_phys_addr_t page
;
3255 page
= addr
& TARGET_PAGE_MASK
;
3256 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3259 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3261 pd
= IO_MEM_UNASSIGNED
;
3263 pd
= p
->phys_offset
;
3267 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3268 target_phys_addr_t addr1
= addr
;
3269 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3271 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3272 /* XXX: could force cpu_single_env to NULL to avoid
3274 if (l
>= 4 && ((addr1
& 3) == 0)) {
3275 /* 32 bit write access */
3277 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3279 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3280 /* 16 bit write access */
3282 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3285 /* 8 bit write access */
3287 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3291 unsigned long addr1
;
3292 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3294 ptr
= qemu_get_ram_ptr(addr1
);
3295 memcpy(ptr
, buf
, l
);
3296 if (!cpu_physical_memory_is_dirty(addr1
)) {
3297 /* invalidate code */
3298 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3300 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3301 (0xff & ~CODE_DIRTY_FLAG
);
3303 /* qemu doesn't execute guest code directly, but kvm does
3304 therefore flush instruction caches */
3306 flush_icache_range((unsigned long)ptr
,
3307 ((unsigned long)ptr
)+l
);
3310 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3311 !(pd
& IO_MEM_ROMD
)) {
3312 target_phys_addr_t addr1
= addr
;
3314 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3316 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3317 if (l
>= 4 && ((addr1
& 3) == 0)) {
3318 /* 32 bit read access */
3319 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3322 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3323 /* 16 bit read access */
3324 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3328 /* 8 bit read access */
3329 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3335 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3336 (addr
& ~TARGET_PAGE_MASK
);
3337 memcpy(buf
, ptr
, l
);
3346 /* used for ROM loading : can write in RAM and ROM */
3347 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3348 const uint8_t *buf
, int len
)
3352 target_phys_addr_t page
;
3357 page
= addr
& TARGET_PAGE_MASK
;
3358 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3361 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3363 pd
= IO_MEM_UNASSIGNED
;
3365 pd
= p
->phys_offset
;
3368 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3369 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3370 !(pd
& IO_MEM_ROMD
)) {
3373 unsigned long addr1
;
3374 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3376 ptr
= qemu_get_ram_ptr(addr1
);
3377 memcpy(ptr
, buf
, l
);
3387 target_phys_addr_t addr
;
3388 target_phys_addr_t len
;
3391 static BounceBuffer bounce
;
3393 typedef struct MapClient
{
3395 void (*callback
)(void *opaque
);
3396 LIST_ENTRY(MapClient
) link
;
3399 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3400 = LIST_HEAD_INITIALIZER(map_client_list
);
3402 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3404 MapClient
*client
= qemu_malloc(sizeof(*client
));
3406 client
->opaque
= opaque
;
3407 client
->callback
= callback
;
3408 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3412 void cpu_unregister_map_client(void *_client
)
3414 MapClient
*client
= (MapClient
*)_client
;
3416 LIST_REMOVE(client
, link
);
3420 static void cpu_notify_map_clients(void)
3424 while (!LIST_EMPTY(&map_client_list
)) {
3425 client
= LIST_FIRST(&map_client_list
);
3426 client
->callback(client
->opaque
);
3427 cpu_unregister_map_client(client
);
3431 /* Map a physical memory region into a host virtual address.
3432 * May map a subset of the requested range, given by and returned in *plen.
3433 * May return NULL if resources needed to perform the mapping are exhausted.
3434 * Use only for reads OR writes - not for read-modify-write operations.
3435 * Use cpu_register_map_client() to know when retrying the map operation is
3436 * likely to succeed.
3438 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3439 target_phys_addr_t
*plen
,
3442 target_phys_addr_t len
= *plen
;
3443 target_phys_addr_t done
= 0;
3445 uint8_t *ret
= NULL
;
3447 target_phys_addr_t page
;
3450 unsigned long addr1
;
3453 page
= addr
& TARGET_PAGE_MASK
;
3454 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3457 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3459 pd
= IO_MEM_UNASSIGNED
;
3461 pd
= p
->phys_offset
;
3464 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3465 if (done
|| bounce
.buffer
) {
3468 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3472 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3474 ptr
= bounce
.buffer
;
3476 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3477 ptr
= qemu_get_ram_ptr(addr1
);
3481 } else if (ret
+ done
!= ptr
) {
3493 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3494 * Will also mark the memory as dirty if is_write == 1. access_len gives
3495 * the amount of memory that was actually read or written by the caller.
3497 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3498 int is_write
, target_phys_addr_t access_len
)
3500 unsigned long flush_len
= (unsigned long)access_len
;
3502 if (buffer
!= bounce
.buffer
) {
3504 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3505 while (access_len
) {
3507 l
= TARGET_PAGE_SIZE
;
3510 if (!cpu_physical_memory_is_dirty(addr1
)) {
3511 /* invalidate code */
3512 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3514 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3515 (0xff & ~CODE_DIRTY_FLAG
);
3520 dma_flush_range((unsigned long)buffer
,
3521 (unsigned long)buffer
+ flush_len
);
3526 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3528 qemu_free(bounce
.buffer
);
3529 bounce
.buffer
= NULL
;
3530 cpu_notify_map_clients();
3533 /* warning: addr must be aligned */
3534 uint32_t ldl_phys(target_phys_addr_t addr
)
3542 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3544 pd
= IO_MEM_UNASSIGNED
;
3546 pd
= p
->phys_offset
;
3549 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3550 !(pd
& IO_MEM_ROMD
)) {
3552 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3554 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3555 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3558 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3559 (addr
& ~TARGET_PAGE_MASK
);
3565 /* warning: addr must be aligned */
3566 uint64_t ldq_phys(target_phys_addr_t addr
)
3574 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3576 pd
= IO_MEM_UNASSIGNED
;
3578 pd
= p
->phys_offset
;
3581 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3582 !(pd
& IO_MEM_ROMD
)) {
3584 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3586 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3587 #ifdef TARGET_WORDS_BIGENDIAN
3588 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3589 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3591 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3592 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3596 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3597 (addr
& ~TARGET_PAGE_MASK
);
3604 uint32_t ldub_phys(target_phys_addr_t addr
)
3607 cpu_physical_memory_read(addr
, &val
, 1);
3612 uint32_t lduw_phys(target_phys_addr_t addr
)
3615 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3616 return tswap16(val
);
3619 /* warning: addr must be aligned. The ram page is not masked as dirty
3620 and the code inside is not invalidated. It is useful if the dirty
3621 bits are used to track modified PTEs */
3622 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3629 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3631 pd
= IO_MEM_UNASSIGNED
;
3633 pd
= p
->phys_offset
;
3636 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3637 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3639 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3640 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3642 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3643 ptr
= qemu_get_ram_ptr(addr1
);
3646 if (unlikely(in_migration
)) {
3647 if (!cpu_physical_memory_is_dirty(addr1
)) {
3648 /* invalidate code */
3649 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3651 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3652 (0xff & ~CODE_DIRTY_FLAG
);
3658 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3665 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3667 pd
= IO_MEM_UNASSIGNED
;
3669 pd
= p
->phys_offset
;
3672 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3673 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3675 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3676 #ifdef TARGET_WORDS_BIGENDIAN
3677 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3678 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3680 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3681 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3684 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3685 (addr
& ~TARGET_PAGE_MASK
);
3690 /* warning: addr must be aligned */
3691 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3698 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3700 pd
= IO_MEM_UNASSIGNED
;
3702 pd
= p
->phys_offset
;
3705 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3706 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3708 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3709 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3711 unsigned long addr1
;
3712 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3714 ptr
= qemu_get_ram_ptr(addr1
);
3716 if (!cpu_physical_memory_is_dirty(addr1
)) {
3717 /* invalidate code */
3718 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3720 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3721 (0xff & ~CODE_DIRTY_FLAG
);
3727 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3730 cpu_physical_memory_write(addr
, &v
, 1);
3734 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3736 uint16_t v
= tswap16(val
);
3737 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3741 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3744 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3749 /* virtual memory access for debug (includes writing to ROM) */
3750 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3751 uint8_t *buf
, int len
, int is_write
)
3754 target_phys_addr_t phys_addr
;
3758 page
= addr
& TARGET_PAGE_MASK
;
3759 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3760 /* if no physical page mapped, return an error */
3761 if (phys_addr
== -1)
3763 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3766 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3767 #if !defined(CONFIG_USER_ONLY)
3769 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3772 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3780 /* in deterministic execution mode, instructions doing device I/Os
3781 must be at the end of the TB */
3782 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3784 TranslationBlock
*tb
;
3786 target_ulong pc
, cs_base
;
3789 tb
= tb_find_pc((unsigned long)retaddr
);
3791 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3794 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3795 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3796 /* Calculate how many instructions had been executed before the fault
3798 n
= n
- env
->icount_decr
.u16
.low
;
3799 /* Generate a new TB ending on the I/O insn. */
3801 /* On MIPS and SH, delay slot instructions can only be restarted if
3802 they were already the first instruction in the TB. If this is not
3803 the first instruction in a TB then re-execute the preceding
3805 #if defined(TARGET_MIPS)
3806 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3807 env
->active_tc
.PC
-= 4;
3808 env
->icount_decr
.u16
.low
++;
3809 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3811 #elif defined(TARGET_SH4)
3812 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3815 env
->icount_decr
.u16
.low
++;
3816 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3819 /* This should never happen. */
3820 if (n
> CF_COUNT_MASK
)
3821 cpu_abort(env
, "TB too big during recompile");
3823 cflags
= n
| CF_LAST_IO
;
3825 cs_base
= tb
->cs_base
;
3827 tb_phys_invalidate(tb
, -1);
3828 /* FIXME: In theory this could raise an exception. In practice
3829 we have already translated the block once so it's probably ok. */
3830 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3831 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3832 the first in the TB) then we end up generating a whole new TB and
3833 repeating the fault, which is horribly inefficient.
3834 Better would be to execute just this insn uncached, or generate a
3836 cpu_resume_from_signal(env
, NULL
);
3839 void dump_exec_info(FILE *f
,
3840 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3842 int i
, target_code_size
, max_target_code_size
;
3843 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3844 TranslationBlock
*tb
;
3846 target_code_size
= 0;
3847 max_target_code_size
= 0;
3849 direct_jmp_count
= 0;
3850 direct_jmp2_count
= 0;
3851 for(i
= 0; i
< nb_tbs
; i
++) {
3853 target_code_size
+= tb
->size
;
3854 if (tb
->size
> max_target_code_size
)
3855 max_target_code_size
= tb
->size
;
3856 if (tb
->page_addr
[1] != -1)
3858 if (tb
->tb_next_offset
[0] != 0xffff) {
3860 if (tb
->tb_next_offset
[1] != 0xffff) {
3861 direct_jmp2_count
++;
3865 /* XXX: avoid using doubles ? */
3866 cpu_fprintf(f
, "Translation buffer state:\n");
3867 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3868 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3869 cpu_fprintf(f
, "TB count %d/%d\n",
3870 nb_tbs
, code_gen_max_blocks
);
3871 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3872 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3873 max_target_code_size
);
3874 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3875 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3876 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3877 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3879 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3880 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3882 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3884 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3885 cpu_fprintf(f
, "\nStatistics:\n");
3886 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3887 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3888 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3889 tcg_dump_info(f
, cpu_fprintf
);
3892 #if !defined(CONFIG_USER_ONLY)
3894 #define MMUSUFFIX _cmmu
3895 #define GETPC() NULL
3896 #define env cpu_single_env
3897 #define SOFTMMU_CODE_ACCESS
3900 #include "softmmu_template.h"
3903 #include "softmmu_template.h"
3906 #include "softmmu_template.h"
3909 #include "softmmu_template.h"