2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if !defined(TARGET_IA64)
48 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #define MMAP_AREA_START 0x00000000
72 #define MMAP_AREA_END 0xa8000000
74 #if defined(TARGET_SPARC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 41
76 #elif defined(TARGET_SPARC)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #elif defined(TARGET_ALPHA)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #define TARGET_VIRT_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_PPC64)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
90 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
91 #define TARGET_PHYS_ADDR_SPACE_BITS 32
94 static TranslationBlock
*tbs
;
95 int code_gen_max_blocks
;
96 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
101 #if defined(__arm__) || defined(__sparc_v9__)
102 /* The prologue must be reachable with a direct jump. ARM and Sparc64
103 have limited branch ranges (possibly also PPC) so place it in a
104 section close to code segment. */
105 #define code_gen_section \
106 __attribute__((__section__(".gen_code"))) \
107 __attribute__((aligned (32)))
109 #define code_gen_section \
110 __attribute__((aligned (32)))
113 uint8_t code_gen_prologue
[1024] code_gen_section
;
114 static uint8_t *code_gen_buffer
;
115 static unsigned long code_gen_buffer_size
;
116 /* threshold to flush the translated code buffer */
117 static unsigned long code_gen_buffer_max_size
;
118 uint8_t *code_gen_ptr
;
120 #if !defined(CONFIG_USER_ONLY)
121 ram_addr_t phys_ram_size
;
123 uint8_t *phys_ram_base
;
124 uint8_t *phys_ram_dirty
;
126 static int in_migration
;
127 static ram_addr_t phys_ram_alloc_offset
= 0;
131 /* current CPU in the current thread. It is only valid inside
133 CPUState
*cpu_single_env
;
134 /* 0 = Do not count executed instructions.
135 1 = Precise instruction counting.
136 2 = Adaptive rate instruction counting. */
138 /* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
142 typedef struct PageDesc
{
143 /* list of TBs intersecting this ram page */
144 TranslationBlock
*first_tb
;
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count
;
148 uint8_t *code_bitmap
;
149 #if defined(CONFIG_USER_ONLY)
154 typedef struct PhysPageDesc
{
155 /* offset in host memory of the page + io_index in the low bits */
156 ram_addr_t phys_offset
;
157 ram_addr_t region_offset
;
161 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162 /* XXX: this is a temporary hack for alpha target.
163 * In the future, this is to be replaced by a multi-level table
164 * to actually be able to handle the complete 64 bits address space.
166 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
171 #define L1_SIZE (1 << L1_BITS)
172 #define L2_SIZE (1 << L2_BITS)
174 unsigned long qemu_real_host_page_size
;
175 unsigned long qemu_host_page_bits
;
176 unsigned long qemu_host_page_size
;
177 unsigned long qemu_host_page_mask
;
179 /* XXX: for system emulation, it could just be an array */
180 static PageDesc
*l1_map
[L1_SIZE
];
181 static PhysPageDesc
**l1_phys_map
;
183 #if !defined(CONFIG_USER_ONLY)
184 static void io_mem_init(void);
186 /* io memory support */
187 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
188 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
189 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
190 char io_mem_used
[IO_MEM_NB_ENTRIES
];
191 static int io_mem_watch
;
195 static const char *logfilename
= "/tmp/qemu.log";
198 static int log_append
= 0;
201 static int tlb_flush_count
;
202 static int tb_flush_count
;
203 static int tb_phys_invalidate_count
;
205 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206 typedef struct subpage_t
{
207 target_phys_addr_t base
;
208 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
209 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
210 void *opaque
[TARGET_PAGE_SIZE
][2][4];
211 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
215 static void map_exec(void *addr
, long size
)
218 VirtualProtect(addr
, size
,
219 PAGE_EXECUTE_READWRITE
, &old_protect
);
223 static void map_exec(void *addr
, long size
)
225 unsigned long start
, end
, page_size
;
227 page_size
= getpagesize();
228 start
= (unsigned long)addr
;
229 start
&= ~(page_size
- 1);
231 end
= (unsigned long)addr
+ size
;
232 end
+= page_size
- 1;
233 end
&= ~(page_size
- 1);
235 mprotect((void *)start
, end
- start
,
236 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
240 static void page_init(void)
242 /* NOTE: we can always suppose that qemu_host_page_size >=
246 SYSTEM_INFO system_info
;
248 GetSystemInfo(&system_info
);
249 qemu_real_host_page_size
= system_info
.dwPageSize
;
252 qemu_real_host_page_size
= getpagesize();
254 if (qemu_host_page_size
== 0)
255 qemu_host_page_size
= qemu_real_host_page_size
;
256 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
257 qemu_host_page_size
= TARGET_PAGE_SIZE
;
258 qemu_host_page_bits
= 0;
259 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
260 qemu_host_page_bits
++;
261 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
262 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
263 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
265 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267 long long startaddr
, endaddr
;
272 last_brk
= (unsigned long)sbrk(0);
273 f
= fopen("/proc/self/maps", "r");
276 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
278 startaddr
= MIN(startaddr
,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
280 endaddr
= MIN(endaddr
,
281 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
282 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
283 TARGET_PAGE_ALIGN(endaddr
),
294 static inline PageDesc
**page_l1_map(target_ulong index
)
296 #if TARGET_LONG_BITS > 32
297 /* Host memory outside guest VM. For 32-bit targets we have already
298 excluded high addresses. */
299 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
302 return &l1_map
[index
>> L2_BITS
];
305 static inline PageDesc
*page_find_alloc(target_ulong index
)
308 lp
= page_l1_map(index
);
314 /* allocate if not found */
315 #if defined(CONFIG_USER_ONLY)
316 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
317 /* Don't use qemu_malloc because it may recurse. */
318 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
319 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
322 unsigned long addr
= h2g(p
);
323 page_set_flags(addr
& TARGET_PAGE_MASK
,
324 TARGET_PAGE_ALIGN(addr
+ len
),
328 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
332 return p
+ (index
& (L2_SIZE
- 1));
335 static inline PageDesc
*page_find(target_ulong index
)
338 lp
= page_l1_map(index
);
345 return p
+ (index
& (L2_SIZE
- 1));
348 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
353 p
= (void **)l1_phys_map
;
354 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
356 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
357 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
362 /* allocate if not found */
365 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
366 memset(p
, 0, sizeof(void *) * L1_SIZE
);
370 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
374 /* allocate if not found */
377 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
379 for (i
= 0; i
< L2_SIZE
; i
++)
380 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
382 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
385 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
387 return phys_page_find_alloc(index
, 0);
390 #if !defined(CONFIG_USER_ONLY)
391 static void tlb_protect_code(ram_addr_t ram_addr
);
392 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
394 #define mmap_lock() do { } while(0)
395 #define mmap_unlock() do { } while(0)
398 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
400 #if defined(CONFIG_USER_ONLY)
401 /* Currently it is not recommanded to allocate big chunks of data in
402 user mode. It will change when a dedicated libc will be used */
403 #define USE_STATIC_CODE_GEN_BUFFER
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
410 static void code_gen_alloc(unsigned long tb_size
)
415 #ifdef USE_STATIC_CODE_GEN_BUFFER
416 code_gen_buffer
= static_code_gen_buffer
;
417 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
418 map_exec(code_gen_buffer
, code_gen_buffer_size
);
420 code_gen_buffer_size
= tb_size
;
421 if (code_gen_buffer_size
== 0) {
422 #if defined(CONFIG_USER_ONLY)
423 /* in user mode, phys_ram_size is not meaningful */
424 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
426 /* XXX: needs ajustments */
427 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
430 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
431 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
432 /* The code gen buffer location may have constraints depending on
433 the host cpu and OS */
434 #if defined(__linux__)
439 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
440 #if defined(__x86_64__)
442 /* Cannot map more than that */
443 if (code_gen_buffer_size
> (800 * 1024 * 1024))
444 code_gen_buffer_size
= (800 * 1024 * 1024);
445 #elif defined(__sparc_v9__)
446 // Map the buffer below 2G, so we can use direct calls and branches
448 start
= (void *) 0x60000000UL
;
449 if (code_gen_buffer_size
> (512 * 1024 * 1024))
450 code_gen_buffer_size
= (512 * 1024 * 1024);
451 #elif defined(__arm__)
452 /* Map the buffer below 32M, so we can use direct calls and branches */
454 start
= (void *) 0x01000000UL
;
455 if (code_gen_buffer_size
> 16 * 1024 * 1024)
456 code_gen_buffer_size
= 16 * 1024 * 1024;
458 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
459 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
461 if (code_gen_buffer
== MAP_FAILED
) {
462 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
466 #elif defined(__FreeBSD__)
470 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
471 #if defined(__x86_64__)
472 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473 * 0x40000000 is free */
475 addr
= (void *)0x40000000;
476 /* Cannot map more than that */
477 if (code_gen_buffer_size
> (800 * 1024 * 1024))
478 code_gen_buffer_size
= (800 * 1024 * 1024);
480 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
481 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
483 if (code_gen_buffer
== MAP_FAILED
) {
484 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
489 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
490 if (!code_gen_buffer
) {
491 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
494 map_exec(code_gen_buffer
, code_gen_buffer_size
);
496 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
497 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
498 code_gen_buffer_max_size
= code_gen_buffer_size
-
499 code_gen_max_block_size();
500 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
501 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
504 /* Must be called before using the QEMU cpus. 'tb_size' is the size
505 (in bytes) allocated to the translation buffer. Zero means default
507 void cpu_exec_init_all(unsigned long tb_size
)
510 code_gen_alloc(tb_size
);
511 code_gen_ptr
= code_gen_buffer
;
513 #if !defined(CONFIG_USER_ONLY)
518 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
520 #define CPU_COMMON_SAVE_VERSION 1
522 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
524 CPUState
*env
= opaque
;
526 qemu_put_be32s(f
, &env
->halted
);
527 qemu_put_be32s(f
, &env
->interrupt_request
);
530 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
532 CPUState
*env
= opaque
;
534 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
537 qemu_get_be32s(f
, &env
->halted
);
538 qemu_get_be32s(f
, &env
->interrupt_request
);
545 void cpu_exec_init(CPUState
*env
)
550 env
->next_cpu
= NULL
;
553 while (*penv
!= NULL
) {
554 penv
= (CPUState
**)&(*penv
)->next_cpu
;
557 env
->cpu_index
= cpu_index
;
558 TAILQ_INIT(&env
->breakpoints
);
559 TAILQ_INIT(&env
->watchpoints
);
561 env
->thread_id
= GetCurrentProcessId();
563 env
->thread_id
= getpid();
566 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
567 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
568 cpu_common_save
, cpu_common_load
, env
);
569 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
570 cpu_save
, cpu_load
, env
);
574 static inline void invalidate_page_bitmap(PageDesc
*p
)
576 if (p
->code_bitmap
) {
577 qemu_free(p
->code_bitmap
);
578 p
->code_bitmap
= NULL
;
580 p
->code_write_count
= 0;
583 /* set to NULL all the 'first_tb' fields in all PageDescs */
584 static void page_flush_tb(void)
589 for(i
= 0; i
< L1_SIZE
; i
++) {
592 for(j
= 0; j
< L2_SIZE
; j
++) {
594 invalidate_page_bitmap(p
);
601 /* flush all the translation blocks */
602 /* XXX: tb_flush is currently not thread safe */
603 void tb_flush(CPUState
*env1
)
606 #if defined(DEBUG_FLUSH)
607 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
608 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
610 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
612 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
613 cpu_abort(env1
, "Internal error: code buffer overflow\n");
617 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
618 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
621 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
624 code_gen_ptr
= code_gen_buffer
;
625 /* XXX: flush processor icache at this point if cache flush is
630 #ifdef DEBUG_TB_CHECK
632 static void tb_invalidate_check(target_ulong address
)
634 TranslationBlock
*tb
;
636 address
&= TARGET_PAGE_MASK
;
637 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
638 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
639 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
640 address
>= tb
->pc
+ tb
->size
)) {
641 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
642 address
, (long)tb
->pc
, tb
->size
);
648 /* verify that all the pages have correct rights for code */
649 static void tb_page_check(void)
651 TranslationBlock
*tb
;
652 int i
, flags1
, flags2
;
654 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
655 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
656 flags1
= page_get_flags(tb
->pc
);
657 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
658 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
659 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
660 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
666 static void tb_jmp_check(TranslationBlock
*tb
)
668 TranslationBlock
*tb1
;
671 /* suppress any remaining jumps to this TB */
675 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
678 tb1
= tb1
->jmp_next
[n1
];
680 /* check end of list */
682 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
688 /* invalidate one TB */
689 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
692 TranslationBlock
*tb1
;
696 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
699 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
703 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
705 TranslationBlock
*tb1
;
711 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
713 *ptb
= tb1
->page_next
[n1
];
716 ptb
= &tb1
->page_next
[n1
];
720 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
722 TranslationBlock
*tb1
, **ptb
;
725 ptb
= &tb
->jmp_next
[n
];
728 /* find tb(n) in circular list */
732 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
733 if (n1
== n
&& tb1
== tb
)
736 ptb
= &tb1
->jmp_first
;
738 ptb
= &tb1
->jmp_next
[n1
];
741 /* now we can suppress tb(n) from the list */
742 *ptb
= tb
->jmp_next
[n
];
744 tb
->jmp_next
[n
] = NULL
;
748 /* reset the jump entry 'n' of a TB so that it is not chained to
750 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
752 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
755 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
760 target_phys_addr_t phys_pc
;
761 TranslationBlock
*tb1
, *tb2
;
763 /* remove the TB from the hash list */
764 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
765 h
= tb_phys_hash_func(phys_pc
);
766 tb_remove(&tb_phys_hash
[h
], tb
,
767 offsetof(TranslationBlock
, phys_hash_next
));
769 /* remove the TB from the page list */
770 if (tb
->page_addr
[0] != page_addr
) {
771 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
772 tb_page_remove(&p
->first_tb
, tb
);
773 invalidate_page_bitmap(p
);
775 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
776 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
777 tb_page_remove(&p
->first_tb
, tb
);
778 invalidate_page_bitmap(p
);
781 tb_invalidated_flag
= 1;
783 /* remove the TB from the hash list */
784 h
= tb_jmp_cache_hash_func(tb
->pc
);
785 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
786 if (env
->tb_jmp_cache
[h
] == tb
)
787 env
->tb_jmp_cache
[h
] = NULL
;
790 /* suppress this TB from the two jump lists */
791 tb_jmp_remove(tb
, 0);
792 tb_jmp_remove(tb
, 1);
794 /* suppress any remaining jumps to this TB */
800 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
801 tb2
= tb1
->jmp_next
[n1
];
802 tb_reset_jump(tb1
, n1
);
803 tb1
->jmp_next
[n1
] = NULL
;
806 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
808 tb_phys_invalidate_count
++;
811 static inline void set_bits(uint8_t *tab
, int start
, int len
)
817 mask
= 0xff << (start
& 7);
818 if ((start
& ~7) == (end
& ~7)) {
820 mask
&= ~(0xff << (end
& 7));
825 start
= (start
+ 8) & ~7;
827 while (start
< end1
) {
832 mask
= ~(0xff << (end
& 7));
838 static void build_page_bitmap(PageDesc
*p
)
840 int n
, tb_start
, tb_end
;
841 TranslationBlock
*tb
;
843 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
850 tb
= (TranslationBlock
*)((long)tb
& ~3);
851 /* NOTE: this is subtle as a TB may span two physical pages */
853 /* NOTE: tb_end may be after the end of the page, but
854 it is not a problem */
855 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
856 tb_end
= tb_start
+ tb
->size
;
857 if (tb_end
> TARGET_PAGE_SIZE
)
858 tb_end
= TARGET_PAGE_SIZE
;
861 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
863 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
864 tb
= tb
->page_next
[n
];
868 TranslationBlock
*tb_gen_code(CPUState
*env
,
869 target_ulong pc
, target_ulong cs_base
,
870 int flags
, int cflags
)
872 TranslationBlock
*tb
;
874 target_ulong phys_pc
, phys_page2
, virt_page2
;
877 phys_pc
= get_phys_addr_code(env
, pc
);
880 /* flush must be done */
882 /* cannot fail at this point */
884 /* Don't forget to invalidate previous TB info. */
885 tb_invalidated_flag
= 1;
887 tc_ptr
= code_gen_ptr
;
889 tb
->cs_base
= cs_base
;
892 cpu_gen_code(env
, tb
, &code_gen_size
);
893 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
895 /* check next page if needed */
896 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
898 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
899 phys_page2
= get_phys_addr_code(env
, virt_page2
);
901 tb_link_phys(tb
, phys_pc
, phys_page2
);
905 /* invalidate all TBs which intersect with the target physical page
906 starting in range [start;end[. NOTE: start and end must refer to
907 the same physical page. 'is_cpu_write_access' should be true if called
908 from a real cpu write access: the virtual CPU will exit the current
909 TB if code is modified inside this TB. */
910 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
911 int is_cpu_write_access
)
913 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
914 CPUState
*env
= cpu_single_env
;
915 target_ulong tb_start
, tb_end
;
918 #ifdef TARGET_HAS_PRECISE_SMC
919 int current_tb_not_found
= is_cpu_write_access
;
920 TranslationBlock
*current_tb
= NULL
;
921 int current_tb_modified
= 0;
922 target_ulong current_pc
= 0;
923 target_ulong current_cs_base
= 0;
924 int current_flags
= 0;
925 #endif /* TARGET_HAS_PRECISE_SMC */
927 p
= page_find(start
>> TARGET_PAGE_BITS
);
930 if (!p
->code_bitmap
&&
931 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
932 is_cpu_write_access
) {
933 /* build code bitmap */
934 build_page_bitmap(p
);
937 /* we remove all the TBs in the range [start, end[ */
938 /* XXX: see if in some cases it could be faster to invalidate all the code */
942 tb
= (TranslationBlock
*)((long)tb
& ~3);
943 tb_next
= tb
->page_next
[n
];
944 /* NOTE: this is subtle as a TB may span two physical pages */
946 /* NOTE: tb_end may be after the end of the page, but
947 it is not a problem */
948 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
949 tb_end
= tb_start
+ tb
->size
;
951 tb_start
= tb
->page_addr
[1];
952 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
954 if (!(tb_end
<= start
|| tb_start
>= end
)) {
955 #ifdef TARGET_HAS_PRECISE_SMC
956 if (current_tb_not_found
) {
957 current_tb_not_found
= 0;
959 if (env
->mem_io_pc
) {
960 /* now we have a real cpu fault */
961 current_tb
= tb_find_pc(env
->mem_io_pc
);
964 if (current_tb
== tb
&&
965 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
966 /* If we are modifying the current TB, we must stop
967 its execution. We could be more precise by checking
968 that the modification is after the current PC, but it
969 would require a specialized function to partially
970 restore the CPU state */
972 current_tb_modified
= 1;
973 cpu_restore_state(current_tb
, env
,
974 env
->mem_io_pc
, NULL
);
975 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
978 #endif /* TARGET_HAS_PRECISE_SMC */
979 /* we need to do that to handle the case where a signal
980 occurs while doing tb_phys_invalidate() */
983 saved_tb
= env
->current_tb
;
984 env
->current_tb
= NULL
;
986 tb_phys_invalidate(tb
, -1);
988 env
->current_tb
= saved_tb
;
989 if (env
->interrupt_request
&& env
->current_tb
)
990 cpu_interrupt(env
, env
->interrupt_request
);
995 #if !defined(CONFIG_USER_ONLY)
996 /* if no code remaining, no need to continue to use slow writes */
998 invalidate_page_bitmap(p
);
999 if (is_cpu_write_access
) {
1000 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1004 #ifdef TARGET_HAS_PRECISE_SMC
1005 if (current_tb_modified
) {
1006 /* we generate a block containing just the instruction
1007 modifying the memory. It will ensure that it cannot modify
1009 env
->current_tb
= NULL
;
1010 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1011 cpu_resume_from_signal(env
, NULL
);
1016 /* len must be <= 8 and start must be a multiple of len */
1017 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1023 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1024 cpu_single_env
->mem_io_vaddr
, len
,
1025 cpu_single_env
->eip
,
1026 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1029 p
= page_find(start
>> TARGET_PAGE_BITS
);
1032 if (p
->code_bitmap
) {
1033 offset
= start
& ~TARGET_PAGE_MASK
;
1034 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1035 if (b
& ((1 << len
) - 1))
1039 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1043 #if !defined(CONFIG_SOFTMMU)
1044 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1045 unsigned long pc
, void *puc
)
1047 TranslationBlock
*tb
;
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 TranslationBlock
*current_tb
= NULL
;
1052 CPUState
*env
= cpu_single_env
;
1053 int current_tb_modified
= 0;
1054 target_ulong current_pc
= 0;
1055 target_ulong current_cs_base
= 0;
1056 int current_flags
= 0;
1059 addr
&= TARGET_PAGE_MASK
;
1060 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1064 #ifdef TARGET_HAS_PRECISE_SMC
1065 if (tb
&& pc
!= 0) {
1066 current_tb
= tb_find_pc(pc
);
1069 while (tb
!= NULL
) {
1071 tb
= (TranslationBlock
*)((long)tb
& ~3);
1072 #ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb
== tb
&&
1074 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1075 /* If we are modifying the current TB, we must stop
1076 its execution. We could be more precise by checking
1077 that the modification is after the current PC, but it
1078 would require a specialized function to partially
1079 restore the CPU state */
1081 current_tb_modified
= 1;
1082 cpu_restore_state(current_tb
, env
, pc
, puc
);
1083 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1086 #endif /* TARGET_HAS_PRECISE_SMC */
1087 tb_phys_invalidate(tb
, addr
);
1088 tb
= tb
->page_next
[n
];
1091 #ifdef TARGET_HAS_PRECISE_SMC
1092 if (current_tb_modified
) {
1093 /* we generate a block containing just the instruction
1094 modifying the memory. It will ensure that it cannot modify
1096 env
->current_tb
= NULL
;
1097 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1098 cpu_resume_from_signal(env
, puc
);
1104 /* add the tb in the target page and protect it if necessary */
1105 static inline void tb_alloc_page(TranslationBlock
*tb
,
1106 unsigned int n
, target_ulong page_addr
)
1109 TranslationBlock
*last_first_tb
;
1111 tb
->page_addr
[n
] = page_addr
;
1112 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1113 tb
->page_next
[n
] = p
->first_tb
;
1114 last_first_tb
= p
->first_tb
;
1115 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1116 invalidate_page_bitmap(p
);
1118 #if defined(TARGET_HAS_SMC) || 1
1120 #if defined(CONFIG_USER_ONLY)
1121 if (p
->flags
& PAGE_WRITE
) {
1126 /* force the host page as non writable (writes will have a
1127 page fault + mprotect overhead) */
1128 page_addr
&= qemu_host_page_mask
;
1130 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1131 addr
+= TARGET_PAGE_SIZE
) {
1133 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1137 p2
->flags
&= ~PAGE_WRITE
;
1138 page_get_flags(addr
);
1140 mprotect(g2h(page_addr
), qemu_host_page_size
,
1141 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1142 #ifdef DEBUG_TB_INVALIDATE
1143 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1148 /* if some code is already present, then the pages are already
1149 protected. So we handle the case where only the first TB is
1150 allocated in a physical page */
1151 if (!last_first_tb
) {
1152 tlb_protect_code(page_addr
);
1156 #endif /* TARGET_HAS_SMC */
1159 /* Allocate a new translation block. Flush the translation buffer if
1160 too many translation blocks or too much generated code. */
1161 TranslationBlock
*tb_alloc(target_ulong pc
)
1163 TranslationBlock
*tb
;
1165 if (nb_tbs
>= code_gen_max_blocks
||
1166 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1168 tb
= &tbs
[nb_tbs
++];
1174 void tb_free(TranslationBlock
*tb
)
1176 /* In practice this is mostly used for single use temporary TB
1177 Ignore the hard cases and just back up if this TB happens to
1178 be the last one generated. */
1179 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1180 code_gen_ptr
= tb
->tc_ptr
;
1185 /* add a new TB and link it to the physical page tables. phys_page2 is
1186 (-1) to indicate that only one page contains the TB. */
1187 void tb_link_phys(TranslationBlock
*tb
,
1188 target_ulong phys_pc
, target_ulong phys_page2
)
1191 TranslationBlock
**ptb
;
1193 /* Grab the mmap lock to stop another thread invalidating this TB
1194 before we are done. */
1196 /* add in the physical hash table */
1197 h
= tb_phys_hash_func(phys_pc
);
1198 ptb
= &tb_phys_hash
[h
];
1199 tb
->phys_hash_next
= *ptb
;
1202 /* add in the page list */
1203 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1204 if (phys_page2
!= -1)
1205 tb_alloc_page(tb
, 1, phys_page2
);
1207 tb
->page_addr
[1] = -1;
1209 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1210 tb
->jmp_next
[0] = NULL
;
1211 tb
->jmp_next
[1] = NULL
;
1213 /* init original jump addresses */
1214 if (tb
->tb_next_offset
[0] != 0xffff)
1215 tb_reset_jump(tb
, 0);
1216 if (tb
->tb_next_offset
[1] != 0xffff)
1217 tb_reset_jump(tb
, 1);
1219 #ifdef DEBUG_TB_CHECK
1225 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1226 tb[1].tc_ptr. Return NULL if not found */
1227 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1229 int m_min
, m_max
, m
;
1231 TranslationBlock
*tb
;
1235 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1236 tc_ptr
>= (unsigned long)code_gen_ptr
)
1238 /* binary search (cf Knuth) */
1241 while (m_min
<= m_max
) {
1242 m
= (m_min
+ m_max
) >> 1;
1244 v
= (unsigned long)tb
->tc_ptr
;
1247 else if (tc_ptr
< v
) {
1256 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1258 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1260 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1263 tb1
= tb
->jmp_next
[n
];
1265 /* find head of list */
1268 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1271 tb1
= tb1
->jmp_next
[n1
];
1273 /* we are now sure now that tb jumps to tb1 */
1276 /* remove tb from the jmp_first list */
1277 ptb
= &tb_next
->jmp_first
;
1281 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1282 if (n1
== n
&& tb1
== tb
)
1284 ptb
= &tb1
->jmp_next
[n1
];
1286 *ptb
= tb
->jmp_next
[n
];
1287 tb
->jmp_next
[n
] = NULL
;
1289 /* suppress the jump to next tb in generated code */
1290 tb_reset_jump(tb
, n
);
1292 /* suppress jumps in the tb on which we could have jumped */
1293 tb_reset_jump_recursive(tb_next
);
1297 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1299 tb_reset_jump_recursive2(tb
, 0);
1300 tb_reset_jump_recursive2(tb
, 1);
1303 #if defined(TARGET_HAS_ICE)
1304 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1306 target_phys_addr_t addr
;
1308 ram_addr_t ram_addr
;
1311 addr
= cpu_get_phys_page_debug(env
, pc
);
1312 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1314 pd
= IO_MEM_UNASSIGNED
;
1316 pd
= p
->phys_offset
;
1318 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1319 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1323 /* Add a watchpoint. */
1324 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1325 int flags
, CPUWatchpoint
**watchpoint
)
1327 target_ulong len_mask
= ~(len
- 1);
1330 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1331 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1332 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1333 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1336 wp
= qemu_malloc(sizeof(*wp
));
1341 wp
->len_mask
= len_mask
;
1344 /* keep all GDB-injected watchpoints in front */
1346 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1348 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1350 tlb_flush_page(env
, addr
);
1357 /* Remove a specific watchpoint. */
1358 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1361 target_ulong len_mask
= ~(len
- 1);
1364 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1365 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1366 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1367 cpu_watchpoint_remove_by_ref(env
, wp
);
1374 /* Remove a specific watchpoint by reference. */
1375 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1377 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1379 tlb_flush_page(env
, watchpoint
->vaddr
);
1381 qemu_free(watchpoint
);
1384 /* Remove all matching watchpoints. */
1385 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1387 CPUWatchpoint
*wp
, *next
;
1389 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1390 if (wp
->flags
& mask
)
1391 cpu_watchpoint_remove_by_ref(env
, wp
);
1395 /* Add a breakpoint. */
1396 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1397 CPUBreakpoint
**breakpoint
)
1399 #if defined(TARGET_HAS_ICE)
1402 bp
= qemu_malloc(sizeof(*bp
));
1409 /* keep all GDB-injected breakpoints in front */
1411 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1413 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1415 breakpoint_invalidate(env
, pc
);
1425 /* Remove a specific breakpoint. */
1426 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1428 #if defined(TARGET_HAS_ICE)
1431 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1432 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1433 cpu_breakpoint_remove_by_ref(env
, bp
);
1443 /* Remove a specific breakpoint by reference. */
1444 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1446 #if defined(TARGET_HAS_ICE)
1447 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1449 breakpoint_invalidate(env
, breakpoint
->pc
);
1451 qemu_free(breakpoint
);
1455 /* Remove all matching breakpoints. */
1456 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1458 #if defined(TARGET_HAS_ICE)
1459 CPUBreakpoint
*bp
, *next
;
1461 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1462 if (bp
->flags
& mask
)
1463 cpu_breakpoint_remove_by_ref(env
, bp
);
1468 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1469 CPU loop after each instruction */
1470 void cpu_single_step(CPUState
*env
, int enabled
)
1472 #if defined(TARGET_HAS_ICE)
1473 if (env
->singlestep_enabled
!= enabled
) {
1474 env
->singlestep_enabled
= enabled
;
1476 kvm_update_guest_debug(env
, 0);
1478 /* must flush all the translated code to avoid inconsistancies */
1479 /* XXX: only flush what is necessary */
1486 /* enable or disable low levels log */
1487 void cpu_set_log(int log_flags
)
1489 loglevel
= log_flags
;
1490 if (loglevel
&& !logfile
) {
1491 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1493 perror(logfilename
);
1496 #if !defined(CONFIG_SOFTMMU)
1497 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1499 static char logfile_buf
[4096];
1500 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1503 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1507 if (!loglevel
&& logfile
) {
1513 void cpu_set_log_filename(const char *filename
)
1515 logfilename
= strdup(filename
);
1520 cpu_set_log(loglevel
);
1523 /* mask must never be zero, except for A20 change call */
1524 void cpu_interrupt(CPUState
*env
, int mask
)
1526 #if !defined(USE_NPTL)
1527 TranslationBlock
*tb
;
1528 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1532 old_mask
= env
->interrupt_request
;
1533 /* FIXME: This is probably not threadsafe. A different thread could
1534 be in the middle of a read-modify-write operation. */
1535 env
->interrupt_request
|= mask
;
1536 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1537 kvm_update_interrupt_request(env
);
1538 #if defined(USE_NPTL)
1539 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1540 problem and hope the cpu will stop of its own accord. For userspace
1541 emulation this often isn't actually as bad as it sounds. Often
1542 signals are used primarily to interrupt blocking syscalls. */
1545 env
->icount_decr
.u16
.high
= 0xffff;
1546 #ifndef CONFIG_USER_ONLY
1547 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1548 an async event happened and we need to process it. */
1550 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1551 cpu_abort(env
, "Raised interrupt while not in I/O function");
1555 tb
= env
->current_tb
;
1556 /* if the cpu is currently executing code, we must unlink it and
1557 all the potentially executing TB */
1558 if (tb
&& !testandset(&interrupt_lock
)) {
1559 env
->current_tb
= NULL
;
1560 tb_reset_jump_recursive(tb
);
1561 resetlock(&interrupt_lock
);
1567 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1569 env
->interrupt_request
&= ~mask
;
1572 const CPULogItem cpu_log_items
[] = {
1573 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1574 "show generated host assembly code for each compiled TB" },
1575 { CPU_LOG_TB_IN_ASM
, "in_asm",
1576 "show target assembly code for each compiled TB" },
1577 { CPU_LOG_TB_OP
, "op",
1578 "show micro ops for each compiled TB" },
1579 { CPU_LOG_TB_OP_OPT
, "op_opt",
1582 "before eflags optimization and "
1584 "after liveness analysis" },
1585 { CPU_LOG_INT
, "int",
1586 "show interrupts/exceptions in short format" },
1587 { CPU_LOG_EXEC
, "exec",
1588 "show trace before each executed TB (lots of logs)" },
1589 { CPU_LOG_TB_CPU
, "cpu",
1590 "show CPU state before block translation" },
1592 { CPU_LOG_PCALL
, "pcall",
1593 "show protected mode far calls/returns/exceptions" },
1594 { CPU_LOG_RESET
, "cpu_reset",
1595 "show CPU state before CPU resets" },
1598 { CPU_LOG_IOPORT
, "ioport",
1599 "show all i/o ports accesses" },
1604 static int cmp1(const char *s1
, int n
, const char *s2
)
1606 if (strlen(s2
) != n
)
1608 return memcmp(s1
, s2
, n
) == 0;
1611 /* takes a comma separated list of log masks. Return 0 if error. */
1612 int cpu_str_to_log_mask(const char *str
)
1614 const CPULogItem
*item
;
1621 p1
= strchr(p
, ',');
1624 if(cmp1(p
,p1
-p
,"all")) {
1625 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1629 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1630 if (cmp1(p
, p1
- p
, item
->name
))
1644 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1651 fprintf(stderr
, "qemu: fatal: ");
1652 vfprintf(stderr
, fmt
, ap
);
1653 fprintf(stderr
, "\n");
1655 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1657 cpu_dump_state(env
, stderr
, fprintf
, 0);
1659 if (qemu_log_enabled()) {
1660 qemu_log("qemu: fatal: ");
1661 qemu_log_vprintf(fmt
, ap2
);
1664 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1666 log_cpu_state(env
, 0);
1676 CPUState
*cpu_copy(CPUState
*env
)
1678 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1679 CPUState
*next_cpu
= new_env
->next_cpu
;
1680 int cpu_index
= new_env
->cpu_index
;
1681 #if defined(TARGET_HAS_ICE)
1686 memcpy(new_env
, env
, sizeof(CPUState
));
1688 /* Preserve chaining and index. */
1689 new_env
->next_cpu
= next_cpu
;
1690 new_env
->cpu_index
= cpu_index
;
1692 /* Clone all break/watchpoints.
1693 Note: Once we support ptrace with hw-debug register access, make sure
1694 BP_CPU break/watchpoints are handled correctly on clone. */
1695 TAILQ_INIT(&env
->breakpoints
);
1696 TAILQ_INIT(&env
->watchpoints
);
1697 #if defined(TARGET_HAS_ICE)
1698 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1699 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1701 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1702 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1710 #if !defined(CONFIG_USER_ONLY)
1712 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1716 /* Discard jump cache entries for any tb which might potentially
1717 overlap the flushed page. */
1718 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1719 memset (&env
->tb_jmp_cache
[i
], 0,
1720 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1722 i
= tb_jmp_cache_hash_page(addr
);
1723 memset (&env
->tb_jmp_cache
[i
], 0,
1724 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1727 /* NOTE: if flush_global is true, also flush global entries (not
1729 void tlb_flush(CPUState
*env
, int flush_global
)
1733 #if defined(DEBUG_TLB)
1734 printf("tlb_flush:\n");
1736 /* must reset current TB so that interrupts cannot modify the
1737 links while we are modifying them */
1738 env
->current_tb
= NULL
;
1740 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1741 env
->tlb_table
[0][i
].addr_read
= -1;
1742 env
->tlb_table
[0][i
].addr_write
= -1;
1743 env
->tlb_table
[0][i
].addr_code
= -1;
1744 env
->tlb_table
[1][i
].addr_read
= -1;
1745 env
->tlb_table
[1][i
].addr_write
= -1;
1746 env
->tlb_table
[1][i
].addr_code
= -1;
1747 #if (NB_MMU_MODES >= 3)
1748 env
->tlb_table
[2][i
].addr_read
= -1;
1749 env
->tlb_table
[2][i
].addr_write
= -1;
1750 env
->tlb_table
[2][i
].addr_code
= -1;
1751 #if (NB_MMU_MODES == 4)
1752 env
->tlb_table
[3][i
].addr_read
= -1;
1753 env
->tlb_table
[3][i
].addr_write
= -1;
1754 env
->tlb_table
[3][i
].addr_code
= -1;
1759 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1762 if (env
->kqemu_enabled
) {
1763 kqemu_flush(env
, flush_global
);
1769 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1771 if (addr
== (tlb_entry
->addr_read
&
1772 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1773 addr
== (tlb_entry
->addr_write
&
1774 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1775 addr
== (tlb_entry
->addr_code
&
1776 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1777 tlb_entry
->addr_read
= -1;
1778 tlb_entry
->addr_write
= -1;
1779 tlb_entry
->addr_code
= -1;
1783 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1787 #if defined(DEBUG_TLB)
1788 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1790 /* must reset current TB so that interrupts cannot modify the
1791 links while we are modifying them */
1792 env
->current_tb
= NULL
;
1794 addr
&= TARGET_PAGE_MASK
;
1795 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1796 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1797 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1798 #if (NB_MMU_MODES >= 3)
1799 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1800 #if (NB_MMU_MODES == 4)
1801 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1805 tlb_flush_jmp_cache(env
, addr
);
1808 if (env
->kqemu_enabled
) {
1809 kqemu_flush_page(env
, addr
);
1814 /* update the TLBs so that writes to code in the virtual page 'addr'
1816 static void tlb_protect_code(ram_addr_t ram_addr
)
1818 cpu_physical_memory_reset_dirty(ram_addr
,
1819 ram_addr
+ TARGET_PAGE_SIZE
,
1823 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1824 tested for self modifying code */
1825 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1828 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1831 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1832 unsigned long start
, unsigned long length
)
1835 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1836 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1837 if ((addr
- start
) < length
) {
1838 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1843 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1847 unsigned long length
, start1
;
1851 start
&= TARGET_PAGE_MASK
;
1852 end
= TARGET_PAGE_ALIGN(end
);
1854 length
= end
- start
;
1857 len
= length
>> TARGET_PAGE_BITS
;
1859 /* XXX: should not depend on cpu context */
1861 if (env
->kqemu_enabled
) {
1864 for(i
= 0; i
< len
; i
++) {
1865 kqemu_set_notdirty(env
, addr
);
1866 addr
+= TARGET_PAGE_SIZE
;
1870 mask
= ~dirty_flags
;
1871 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1872 for(i
= 0; i
< len
; i
++)
1875 /* we modify the TLB cache so that the dirty bit will be set again
1876 when accessing the range */
1877 start1
= start
+ (unsigned long)phys_ram_base
;
1878 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1879 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1880 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1881 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1882 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1883 #if (NB_MMU_MODES >= 3)
1884 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1885 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1886 #if (NB_MMU_MODES == 4)
1887 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1888 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1894 int cpu_physical_memory_set_dirty_tracking(int enable
)
1899 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1900 in_migration
= enable
;
1904 int cpu_physical_memory_get_dirty_tracking(void)
1906 return in_migration
;
1909 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1912 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1915 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1917 ram_addr_t ram_addr
;
1919 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1920 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1921 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1922 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1923 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1928 /* update the TLB according to the current state of the dirty bits */
1929 void cpu_tlb_update_dirty(CPUState
*env
)
1932 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1933 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1934 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1935 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1936 #if (NB_MMU_MODES >= 3)
1937 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1938 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1939 #if (NB_MMU_MODES == 4)
1940 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1941 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1946 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1948 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1949 tlb_entry
->addr_write
= vaddr
;
1952 /* update the TLB corresponding to virtual page vaddr
1953 so that it is no longer dirty */
1954 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1958 vaddr
&= TARGET_PAGE_MASK
;
1959 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1960 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1961 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1962 #if (NB_MMU_MODES >= 3)
1963 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1964 #if (NB_MMU_MODES == 4)
1965 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1970 /* add a new TLB entry. At most one entry for a given virtual address
1971 is permitted. Return 0 if OK or 2 if the page could not be mapped
1972 (can only happen in non SOFTMMU mode for I/O pages or pages
1973 conflicting with the host address space). */
1974 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1975 target_phys_addr_t paddr
, int prot
,
1976 int mmu_idx
, int is_softmmu
)
1981 target_ulong address
;
1982 target_ulong code_address
;
1983 target_phys_addr_t addend
;
1987 target_phys_addr_t iotlb
;
1989 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1991 pd
= IO_MEM_UNASSIGNED
;
1993 pd
= p
->phys_offset
;
1995 #if defined(DEBUG_TLB)
1996 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1997 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2002 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2003 /* IO memory case (romd handled later) */
2004 address
|= TLB_MMIO
;
2006 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
2007 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2009 iotlb
= pd
& TARGET_PAGE_MASK
;
2010 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2011 iotlb
|= IO_MEM_NOTDIRTY
;
2013 iotlb
|= IO_MEM_ROM
;
2015 /* IO handlers are currently passed a phsical address.
2016 It would be nice to pass an offset from the base address
2017 of that region. This would avoid having to special case RAM,
2018 and avoid full address decoding in every device.
2019 We can't use the high bits of pd for this because
2020 IO_MEM_ROMD uses these as a ram address. */
2021 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2023 iotlb
+= p
->region_offset
;
2029 code_address
= address
;
2030 /* Make accesses to pages with watchpoints go via the
2031 watchpoint trap routines. */
2032 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2033 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2034 iotlb
= io_mem_watch
+ paddr
;
2035 /* TODO: The memory case can be optimized by not trapping
2036 reads of pages with a write breakpoint. */
2037 address
|= TLB_MMIO
;
2041 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2042 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2043 te
= &env
->tlb_table
[mmu_idx
][index
];
2044 te
->addend
= addend
- vaddr
;
2045 if (prot
& PAGE_READ
) {
2046 te
->addr_read
= address
;
2051 if (prot
& PAGE_EXEC
) {
2052 te
->addr_code
= code_address
;
2056 if (prot
& PAGE_WRITE
) {
2057 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2058 (pd
& IO_MEM_ROMD
)) {
2059 /* Write access calls the I/O callback. */
2060 te
->addr_write
= address
| TLB_MMIO
;
2061 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2062 !cpu_physical_memory_is_dirty(pd
)) {
2063 te
->addr_write
= address
| TLB_NOTDIRTY
;
2065 te
->addr_write
= address
;
2068 te
->addr_write
= -1;
2075 void tlb_flush(CPUState
*env
, int flush_global
)
2079 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2083 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2084 target_phys_addr_t paddr
, int prot
,
2085 int mmu_idx
, int is_softmmu
)
2090 /* dump memory mappings */
2091 void page_dump(FILE *f
)
2093 unsigned long start
, end
;
2094 int i
, j
, prot
, prot1
;
2097 fprintf(f
, "%-8s %-8s %-8s %s\n",
2098 "start", "end", "size", "prot");
2102 for(i
= 0; i
<= L1_SIZE
; i
++) {
2107 for(j
= 0;j
< L2_SIZE
; j
++) {
2112 if (prot1
!= prot
) {
2113 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2115 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2116 start
, end
, end
- start
,
2117 prot
& PAGE_READ
? 'r' : '-',
2118 prot
& PAGE_WRITE
? 'w' : '-',
2119 prot
& PAGE_EXEC
? 'x' : '-');
2133 int page_get_flags(target_ulong address
)
2137 p
= page_find(address
>> TARGET_PAGE_BITS
);
2143 /* modify the flags of a page and invalidate the code if
2144 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2145 depending on PAGE_WRITE */
2146 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2151 /* mmap_lock should already be held. */
2152 start
= start
& TARGET_PAGE_MASK
;
2153 end
= TARGET_PAGE_ALIGN(end
);
2154 if (flags
& PAGE_WRITE
)
2155 flags
|= PAGE_WRITE_ORG
;
2156 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2157 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2158 /* We may be called for host regions that are outside guest
2162 /* if the write protection is set, then we invalidate the code
2164 if (!(p
->flags
& PAGE_WRITE
) &&
2165 (flags
& PAGE_WRITE
) &&
2167 tb_invalidate_phys_page(addr
, 0, NULL
);
2173 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2179 if (start
+ len
< start
)
2180 /* we've wrapped around */
2183 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2184 start
= start
& TARGET_PAGE_MASK
;
2186 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2187 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2190 if( !(p
->flags
& PAGE_VALID
) )
2193 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2195 if (flags
& PAGE_WRITE
) {
2196 if (!(p
->flags
& PAGE_WRITE_ORG
))
2198 /* unprotect the page if it was put read-only because it
2199 contains translated code */
2200 if (!(p
->flags
& PAGE_WRITE
)) {
2201 if (!page_unprotect(addr
, 0, NULL
))
2210 /* called from signal handler: invalidate the code and unprotect the
2211 page. Return TRUE if the fault was succesfully handled. */
2212 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2214 unsigned int page_index
, prot
, pindex
;
2216 target_ulong host_start
, host_end
, addr
;
2218 /* Technically this isn't safe inside a signal handler. However we
2219 know this only ever happens in a synchronous SEGV handler, so in
2220 practice it seems to be ok. */
2223 host_start
= address
& qemu_host_page_mask
;
2224 page_index
= host_start
>> TARGET_PAGE_BITS
;
2225 p1
= page_find(page_index
);
2230 host_end
= host_start
+ qemu_host_page_size
;
2233 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2237 /* if the page was really writable, then we change its
2238 protection back to writable */
2239 if (prot
& PAGE_WRITE_ORG
) {
2240 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2241 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2242 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2243 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2244 p1
[pindex
].flags
|= PAGE_WRITE
;
2245 /* and since the content will be modified, we must invalidate
2246 the corresponding translated code. */
2247 tb_invalidate_phys_page(address
, pc
, puc
);
2248 #ifdef DEBUG_TB_CHECK
2249 tb_invalidate_check(address
);
2259 static inline void tlb_set_dirty(CPUState
*env
,
2260 unsigned long addr
, target_ulong vaddr
)
2263 #endif /* defined(CONFIG_USER_ONLY) */
2265 #if !defined(CONFIG_USER_ONLY)
2267 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2268 ram_addr_t memory
, ram_addr_t region_offset
);
2269 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2270 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2271 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2274 if (addr > start_addr) \
2277 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2278 if (start_addr2 > 0) \
2282 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2283 end_addr2 = TARGET_PAGE_SIZE - 1; \
2285 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2286 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2291 /* register physical memory. 'size' must be a multiple of the target
2292 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2293 io memory page. The address used when calling the IO function is
2294 the offset from the start of the region, plus region_offset. Both
2295 start_region and regon_offset are rounded down to a page boundary
2296 before calculating this offset. This should not be a problem unless
2297 the low bits of start_addr and region_offset differ. */
2298 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2300 ram_addr_t phys_offset
,
2301 ram_addr_t region_offset
)
2303 target_phys_addr_t addr
, end_addr
;
2306 ram_addr_t orig_size
= size
;
2310 /* XXX: should not depend on cpu context */
2312 if (env
->kqemu_enabled
) {
2313 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2317 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2319 region_offset
&= TARGET_PAGE_MASK
;
2320 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2321 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2322 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2323 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2324 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2325 ram_addr_t orig_memory
= p
->phys_offset
;
2326 target_phys_addr_t start_addr2
, end_addr2
;
2327 int need_subpage
= 0;
2329 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2331 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2332 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2333 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2334 &p
->phys_offset
, orig_memory
,
2337 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2340 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2342 p
->region_offset
= 0;
2344 p
->phys_offset
= phys_offset
;
2345 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2346 (phys_offset
& IO_MEM_ROMD
))
2347 phys_offset
+= TARGET_PAGE_SIZE
;
2350 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2351 p
->phys_offset
= phys_offset
;
2352 p
->region_offset
= region_offset
;
2353 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2354 (phys_offset
& IO_MEM_ROMD
)) {
2355 phys_offset
+= TARGET_PAGE_SIZE
;
2357 target_phys_addr_t start_addr2
, end_addr2
;
2358 int need_subpage
= 0;
2360 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2361 end_addr2
, need_subpage
);
2363 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2364 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2365 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2367 subpage_register(subpage
, start_addr2
, end_addr2
,
2368 phys_offset
, region_offset
);
2369 p
->region_offset
= 0;
2373 region_offset
+= TARGET_PAGE_SIZE
;
2376 /* since each CPU stores ram addresses in its TLB cache, we must
2377 reset the modified entries */
2379 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2384 /* XXX: temporary until new memory mapping API */
2385 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2389 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2391 return IO_MEM_UNASSIGNED
;
2392 return p
->phys_offset
;
2395 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2398 kvm_coalesce_mmio_region(addr
, size
);
2401 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2404 kvm_uncoalesce_mmio_region(addr
, size
);
2407 /* XXX: better than nothing */
2408 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2411 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2412 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2413 (uint64_t)size
, (uint64_t)phys_ram_size
);
2416 addr
= phys_ram_alloc_offset
;
2417 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2421 void qemu_ram_free(ram_addr_t addr
)
2425 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2427 #ifdef DEBUG_UNASSIGNED
2428 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2430 #if defined(TARGET_SPARC)
2431 do_unassigned_access(addr
, 0, 0, 0, 1);
2436 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2438 #ifdef DEBUG_UNASSIGNED
2439 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2441 #if defined(TARGET_SPARC)
2442 do_unassigned_access(addr
, 0, 0, 0, 2);
2447 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2449 #ifdef DEBUG_UNASSIGNED
2450 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2452 #if defined(TARGET_SPARC)
2453 do_unassigned_access(addr
, 0, 0, 0, 4);
2458 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2460 #ifdef DEBUG_UNASSIGNED
2461 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2463 #if defined(TARGET_SPARC)
2464 do_unassigned_access(addr
, 1, 0, 0, 1);
2468 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2470 #ifdef DEBUG_UNASSIGNED
2471 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2473 #if defined(TARGET_SPARC)
2474 do_unassigned_access(addr
, 1, 0, 0, 2);
2478 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2480 #ifdef DEBUG_UNASSIGNED
2481 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2483 #if defined(TARGET_SPARC)
2484 do_unassigned_access(addr
, 1, 0, 0, 4);
2488 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2489 unassigned_mem_readb
,
2490 unassigned_mem_readw
,
2491 unassigned_mem_readl
,
2494 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2495 unassigned_mem_writeb
,
2496 unassigned_mem_writew
,
2497 unassigned_mem_writel
,
2500 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2504 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2505 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2506 #if !defined(CONFIG_USER_ONLY)
2507 tb_invalidate_phys_page_fast(ram_addr
, 1);
2508 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2511 stb_p(phys_ram_base
+ ram_addr
, val
);
2513 if (cpu_single_env
->kqemu_enabled
&&
2514 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2515 kqemu_modify_page(cpu_single_env
, ram_addr
);
2517 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2518 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2519 /* we remove the notdirty callback only if the code has been
2521 if (dirty_flags
== 0xff)
2522 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2525 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2529 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2530 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2531 #if !defined(CONFIG_USER_ONLY)
2532 tb_invalidate_phys_page_fast(ram_addr
, 2);
2533 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2536 stw_p(phys_ram_base
+ ram_addr
, val
);
2538 if (cpu_single_env
->kqemu_enabled
&&
2539 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2540 kqemu_modify_page(cpu_single_env
, ram_addr
);
2542 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2543 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2544 /* we remove the notdirty callback only if the code has been
2546 if (dirty_flags
== 0xff)
2547 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2550 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2554 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2555 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2556 #if !defined(CONFIG_USER_ONLY)
2557 tb_invalidate_phys_page_fast(ram_addr
, 4);
2558 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2561 stl_p(phys_ram_base
+ ram_addr
, val
);
2563 if (cpu_single_env
->kqemu_enabled
&&
2564 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2565 kqemu_modify_page(cpu_single_env
, ram_addr
);
2567 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2568 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2569 /* we remove the notdirty callback only if the code has been
2571 if (dirty_flags
== 0xff)
2572 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2575 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2576 NULL
, /* never used */
2577 NULL
, /* never used */
2578 NULL
, /* never used */
2581 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2582 notdirty_mem_writeb
,
2583 notdirty_mem_writew
,
2584 notdirty_mem_writel
,
2587 /* Generate a debug exception if a watchpoint has been hit. */
2588 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2590 CPUState
*env
= cpu_single_env
;
2591 target_ulong pc
, cs_base
;
2592 TranslationBlock
*tb
;
2597 if (env
->watchpoint_hit
) {
2598 /* We re-entered the check after replacing the TB. Now raise
2599 * the debug interrupt so that is will trigger after the
2600 * current instruction. */
2601 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2604 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2605 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2606 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2607 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2608 wp
->flags
|= BP_WATCHPOINT_HIT
;
2609 if (!env
->watchpoint_hit
) {
2610 env
->watchpoint_hit
= wp
;
2611 tb
= tb_find_pc(env
->mem_io_pc
);
2613 cpu_abort(env
, "check_watchpoint: could not find TB for "
2614 "pc=%p", (void *)env
->mem_io_pc
);
2616 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2617 tb_phys_invalidate(tb
, -1);
2618 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2619 env
->exception_index
= EXCP_DEBUG
;
2621 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2622 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2624 cpu_resume_from_signal(env
, NULL
);
2627 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2632 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2633 so these check for a hit then pass through to the normal out-of-line
2635 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2637 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2638 return ldub_phys(addr
);
2641 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2643 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2644 return lduw_phys(addr
);
2647 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2649 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2650 return ldl_phys(addr
);
2653 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2656 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2657 stb_phys(addr
, val
);
2660 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2663 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2664 stw_phys(addr
, val
);
2667 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2670 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2671 stl_phys(addr
, val
);
2674 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2680 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2686 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2692 idx
= SUBPAGE_IDX(addr
);
2693 #if defined(DEBUG_SUBPAGE)
2694 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2695 mmio
, len
, addr
, idx
);
2697 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2698 addr
+ mmio
->region_offset
[idx
][0][len
]);
2703 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2704 uint32_t value
, unsigned int len
)
2708 idx
= SUBPAGE_IDX(addr
);
2709 #if defined(DEBUG_SUBPAGE)
2710 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2711 mmio
, len
, addr
, idx
, value
);
2713 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2714 addr
+ mmio
->region_offset
[idx
][1][len
],
2718 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2720 #if defined(DEBUG_SUBPAGE)
2721 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2724 return subpage_readlen(opaque
, addr
, 0);
2727 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2730 #if defined(DEBUG_SUBPAGE)
2731 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2733 subpage_writelen(opaque
, addr
, value
, 0);
2736 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2738 #if defined(DEBUG_SUBPAGE)
2739 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2742 return subpage_readlen(opaque
, addr
, 1);
2745 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2748 #if defined(DEBUG_SUBPAGE)
2749 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2751 subpage_writelen(opaque
, addr
, value
, 1);
2754 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2756 #if defined(DEBUG_SUBPAGE)
2757 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2760 return subpage_readlen(opaque
, addr
, 2);
2763 static void subpage_writel (void *opaque
,
2764 target_phys_addr_t addr
, uint32_t value
)
2766 #if defined(DEBUG_SUBPAGE)
2767 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2769 subpage_writelen(opaque
, addr
, value
, 2);
2772 static CPUReadMemoryFunc
*subpage_read
[] = {
2778 static CPUWriteMemoryFunc
*subpage_write
[] = {
2784 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2785 ram_addr_t memory
, ram_addr_t region_offset
)
2790 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2792 idx
= SUBPAGE_IDX(start
);
2793 eidx
= SUBPAGE_IDX(end
);
2794 #if defined(DEBUG_SUBPAGE)
2795 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2796 mmio
, start
, end
, idx
, eidx
, memory
);
2798 memory
>>= IO_MEM_SHIFT
;
2799 for (; idx
<= eidx
; idx
++) {
2800 for (i
= 0; i
< 4; i
++) {
2801 if (io_mem_read
[memory
][i
]) {
2802 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2803 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2804 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2806 if (io_mem_write
[memory
][i
]) {
2807 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2808 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2809 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2817 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2818 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2823 mmio
= qemu_mallocz(sizeof(subpage_t
));
2826 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2827 #if defined(DEBUG_SUBPAGE)
2828 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2829 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2831 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2832 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2839 static int get_free_io_mem_idx(void)
2843 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2844 if (!io_mem_used
[i
]) {
2852 static void io_mem_init(void)
2856 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2857 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2858 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2862 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2863 watch_mem_write
, NULL
);
2864 /* alloc dirty bits array */
2865 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2866 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2869 /* mem_read and mem_write are arrays of functions containing the
2870 function to access byte (index 0), word (index 1) and dword (index
2871 2). Functions can be omitted with a NULL function pointer. The
2872 registered functions may be modified dynamically later.
2873 If io_index is non zero, the corresponding io zone is
2874 modified. If it is zero, a new io zone is allocated. The return
2875 value can be used with cpu_register_physical_memory(). (-1) is
2876 returned if error. */
2877 int cpu_register_io_memory(int io_index
,
2878 CPUReadMemoryFunc
**mem_read
,
2879 CPUWriteMemoryFunc
**mem_write
,
2882 int i
, subwidth
= 0;
2884 if (io_index
<= 0) {
2885 io_index
= get_free_io_mem_idx();
2889 if (io_index
>= IO_MEM_NB_ENTRIES
)
2893 for(i
= 0;i
< 3; i
++) {
2894 if (!mem_read
[i
] || !mem_write
[i
])
2895 subwidth
= IO_MEM_SUBWIDTH
;
2896 io_mem_read
[io_index
][i
] = mem_read
[i
];
2897 io_mem_write
[io_index
][i
] = mem_write
[i
];
2899 io_mem_opaque
[io_index
] = opaque
;
2900 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2903 void cpu_unregister_io_memory(int io_table_address
)
2906 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2908 for (i
=0;i
< 3; i
++) {
2909 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2910 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2912 io_mem_opaque
[io_index
] = NULL
;
2913 io_mem_used
[io_index
] = 0;
2916 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2918 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2921 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2923 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2926 #endif /* !defined(CONFIG_USER_ONLY) */
2928 /* physical memory access (slow version, mainly for debug) */
2929 #if defined(CONFIG_USER_ONLY)
2930 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2931 int len
, int is_write
)
2938 page
= addr
& TARGET_PAGE_MASK
;
2939 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2942 flags
= page_get_flags(page
);
2943 if (!(flags
& PAGE_VALID
))
2946 if (!(flags
& PAGE_WRITE
))
2948 /* XXX: this code should not depend on lock_user */
2949 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2950 /* FIXME - should this return an error rather than just fail? */
2953 unlock_user(p
, addr
, l
);
2955 if (!(flags
& PAGE_READ
))
2957 /* XXX: this code should not depend on lock_user */
2958 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2959 /* FIXME - should this return an error rather than just fail? */
2962 unlock_user(p
, addr
, 0);
2971 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2972 int len
, int is_write
)
2977 target_phys_addr_t page
;
2982 page
= addr
& TARGET_PAGE_MASK
;
2983 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2986 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2988 pd
= IO_MEM_UNASSIGNED
;
2990 pd
= p
->phys_offset
;
2994 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2995 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2997 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2998 /* XXX: could force cpu_single_env to NULL to avoid
3000 if (l
>= 4 && ((addr
& 3) == 0)) {
3001 /* 32 bit write access */
3003 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3005 } else if (l
>= 2 && ((addr
& 1) == 0)) {
3006 /* 16 bit write access */
3008 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
3011 /* 8 bit write access */
3013 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
3017 unsigned long addr1
;
3018 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3020 ptr
= phys_ram_base
+ addr1
;
3021 memcpy(ptr
, buf
, l
);
3022 if (!cpu_physical_memory_is_dirty(addr1
)) {
3023 /* invalidate code */
3024 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3026 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3027 (0xff & ~CODE_DIRTY_FLAG
);
3029 /* qemu doesn't execute guest code directly, but kvm does
3030 therefore fluch instruction caches */
3032 flush_icache_range((unsigned long)ptr
,
3033 ((unsigned long)ptr
)+l
);
3036 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3037 !(pd
& IO_MEM_ROMD
)) {
3039 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3041 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3042 if (l
>= 4 && ((addr
& 3) == 0)) {
3043 /* 32 bit read access */
3044 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3047 } else if (l
>= 2 && ((addr
& 1) == 0)) {
3048 /* 16 bit read access */
3049 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
3053 /* 8 bit read access */
3054 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
3060 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3061 (addr
& ~TARGET_PAGE_MASK
);
3062 memcpy(buf
, ptr
, l
);
3071 /* used for ROM loading : can write in RAM and ROM */
3072 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3073 const uint8_t *buf
, int len
)
3077 target_phys_addr_t page
;
3082 page
= addr
& TARGET_PAGE_MASK
;
3083 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3086 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3088 pd
= IO_MEM_UNASSIGNED
;
3090 pd
= p
->phys_offset
;
3093 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3094 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3095 !(pd
& IO_MEM_ROMD
)) {
3098 unsigned long addr1
;
3099 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3101 ptr
= phys_ram_base
+ addr1
;
3102 memcpy(ptr
, buf
, l
);
3112 target_phys_addr_t addr
;
3113 target_phys_addr_t len
;
3116 static BounceBuffer bounce
;
3118 typedef struct MapClient
{
3120 void (*callback
)(void *opaque
);
3121 LIST_ENTRY(MapClient
) link
;
3124 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3125 = LIST_HEAD_INITIALIZER(map_client_list
);
3127 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3129 MapClient
*client
= qemu_malloc(sizeof(*client
));
3131 client
->opaque
= opaque
;
3132 client
->callback
= callback
;
3133 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3137 void cpu_unregister_map_client(void *_client
)
3139 MapClient
*client
= (MapClient
*)_client
;
3141 LIST_REMOVE(client
, link
);
3144 static void cpu_notify_map_clients(void)
3148 while (!LIST_EMPTY(&map_client_list
)) {
3149 client
= LIST_FIRST(&map_client_list
);
3150 client
->callback(client
->opaque
);
3151 LIST_REMOVE(client
, link
);
3155 /* Map a physical memory region into a host virtual address.
3156 * May map a subset of the requested range, given by and returned in *plen.
3157 * May return NULL if resources needed to perform the mapping are exhausted.
3158 * Use only for reads OR writes - not for read-modify-write operations.
3159 * Use cpu_register_map_client() to know when retrying the map operation is
3160 * likely to succeed.
3162 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3163 target_phys_addr_t
*plen
,
3166 target_phys_addr_t len
= *plen
;
3167 target_phys_addr_t done
= 0;
3169 uint8_t *ret
= NULL
;
3171 target_phys_addr_t page
;
3174 unsigned long addr1
;
3177 page
= addr
& TARGET_PAGE_MASK
;
3178 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3181 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3183 pd
= IO_MEM_UNASSIGNED
;
3185 pd
= p
->phys_offset
;
3188 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3189 if (done
|| bounce
.buffer
) {
3192 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3196 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3198 ptr
= bounce
.buffer
;
3200 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3201 ptr
= phys_ram_base
+ addr1
;
3205 } else if (ret
+ done
!= ptr
) {
3217 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3218 * Will also mark the memory as dirty if is_write == 1. access_len gives
3219 * the amount of memory that was actually read or written by the caller.
3221 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3222 int is_write
, target_phys_addr_t access_len
)
3224 if (buffer
!= bounce
.buffer
) {
3226 unsigned long addr1
= (uint8_t *)buffer
- phys_ram_base
;
3227 while (access_len
) {
3229 l
= TARGET_PAGE_SIZE
;
3232 if (!cpu_physical_memory_is_dirty(addr1
)) {
3233 /* invalidate code */
3234 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3236 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3237 (0xff & ~CODE_DIRTY_FLAG
);
3246 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3248 qemu_free(bounce
.buffer
);
3249 bounce
.buffer
= NULL
;
3250 cpu_notify_map_clients();
3253 /* warning: addr must be aligned */
3254 uint32_t ldl_phys(target_phys_addr_t addr
)
3262 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3264 pd
= IO_MEM_UNASSIGNED
;
3266 pd
= p
->phys_offset
;
3269 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3270 !(pd
& IO_MEM_ROMD
)) {
3272 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3274 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3275 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3278 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3279 (addr
& ~TARGET_PAGE_MASK
);
3285 /* warning: addr must be aligned */
3286 uint64_t ldq_phys(target_phys_addr_t addr
)
3294 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3296 pd
= IO_MEM_UNASSIGNED
;
3298 pd
= p
->phys_offset
;
3301 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3302 !(pd
& IO_MEM_ROMD
)) {
3304 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3306 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3307 #ifdef TARGET_WORDS_BIGENDIAN
3308 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3309 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3311 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3312 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3316 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3317 (addr
& ~TARGET_PAGE_MASK
);
3324 uint32_t ldub_phys(target_phys_addr_t addr
)
3327 cpu_physical_memory_read(addr
, &val
, 1);
3332 uint32_t lduw_phys(target_phys_addr_t addr
)
3335 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3336 return tswap16(val
);
3340 #define likely(x) __builtin_expect(!!(x), 1)
3341 #define unlikely(x) __builtin_expect(!!(x), 0)
3344 #define unlikely(x) x
3347 /* warning: addr must be aligned. The ram page is not masked as dirty
3348 and the code inside is not invalidated. It is useful if the dirty
3349 bits are used to track modified PTEs */
3350 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3357 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3359 pd
= IO_MEM_UNASSIGNED
;
3361 pd
= p
->phys_offset
;
3364 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3365 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3367 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3368 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3370 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3371 ptr
= phys_ram_base
+ addr1
;
3374 if (unlikely(in_migration
)) {
3375 if (!cpu_physical_memory_is_dirty(addr1
)) {
3376 /* invalidate code */
3377 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3379 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3380 (0xff & ~CODE_DIRTY_FLAG
);
3386 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3393 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3395 pd
= IO_MEM_UNASSIGNED
;
3397 pd
= p
->phys_offset
;
3400 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3401 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3403 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3404 #ifdef TARGET_WORDS_BIGENDIAN
3405 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3406 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3408 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3409 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3412 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3413 (addr
& ~TARGET_PAGE_MASK
);
3418 /* warning: addr must be aligned */
3419 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3426 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3428 pd
= IO_MEM_UNASSIGNED
;
3430 pd
= p
->phys_offset
;
3433 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3434 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3436 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3437 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3439 unsigned long addr1
;
3440 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3442 ptr
= phys_ram_base
+ addr1
;
3444 if (!cpu_physical_memory_is_dirty(addr1
)) {
3445 /* invalidate code */
3446 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3448 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3449 (0xff & ~CODE_DIRTY_FLAG
);
3455 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3458 cpu_physical_memory_write(addr
, &v
, 1);
3462 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3464 uint16_t v
= tswap16(val
);
3465 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3469 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3472 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3477 /* virtual memory access for debug */
3478 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3479 uint8_t *buf
, int len
, int is_write
)
3482 target_phys_addr_t phys_addr
;
3486 page
= addr
& TARGET_PAGE_MASK
;
3487 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3488 /* if no physical page mapped, return an error */
3489 if (phys_addr
== -1)
3491 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3494 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3503 /* in deterministic execution mode, instructions doing device I/Os
3504 must be at the end of the TB */
3505 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3507 TranslationBlock
*tb
;
3509 target_ulong pc
, cs_base
;
3512 tb
= tb_find_pc((unsigned long)retaddr
);
3514 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3517 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3518 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3519 /* Calculate how many instructions had been executed before the fault
3521 n
= n
- env
->icount_decr
.u16
.low
;
3522 /* Generate a new TB ending on the I/O insn. */
3524 /* On MIPS and SH, delay slot instructions can only be restarted if
3525 they were already the first instruction in the TB. If this is not
3526 the first instruction in a TB then re-execute the preceding
3528 #if defined(TARGET_MIPS)
3529 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3530 env
->active_tc
.PC
-= 4;
3531 env
->icount_decr
.u16
.low
++;
3532 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3534 #elif defined(TARGET_SH4)
3535 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3538 env
->icount_decr
.u16
.low
++;
3539 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3542 /* This should never happen. */
3543 if (n
> CF_COUNT_MASK
)
3544 cpu_abort(env
, "TB too big during recompile");
3546 cflags
= n
| CF_LAST_IO
;
3548 cs_base
= tb
->cs_base
;
3550 tb_phys_invalidate(tb
, -1);
3551 /* FIXME: In theory this could raise an exception. In practice
3552 we have already translated the block once so it's probably ok. */
3553 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3554 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3555 the first in the TB) then we end up generating a whole new TB and
3556 repeating the fault, which is horribly inefficient.
3557 Better would be to execute just this insn uncached, or generate a
3559 cpu_resume_from_signal(env
, NULL
);
3562 void dump_exec_info(FILE *f
,
3563 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3565 int i
, target_code_size
, max_target_code_size
;
3566 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3567 TranslationBlock
*tb
;
3569 target_code_size
= 0;
3570 max_target_code_size
= 0;
3572 direct_jmp_count
= 0;
3573 direct_jmp2_count
= 0;
3574 for(i
= 0; i
< nb_tbs
; i
++) {
3576 target_code_size
+= tb
->size
;
3577 if (tb
->size
> max_target_code_size
)
3578 max_target_code_size
= tb
->size
;
3579 if (tb
->page_addr
[1] != -1)
3581 if (tb
->tb_next_offset
[0] != 0xffff) {
3583 if (tb
->tb_next_offset
[1] != 0xffff) {
3584 direct_jmp2_count
++;
3588 /* XXX: avoid using doubles ? */
3589 cpu_fprintf(f
, "Translation buffer state:\n");
3590 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3591 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3592 cpu_fprintf(f
, "TB count %d/%d\n",
3593 nb_tbs
, code_gen_max_blocks
);
3594 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3595 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3596 max_target_code_size
);
3597 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3598 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3599 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3600 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3602 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3603 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3605 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3607 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3608 cpu_fprintf(f
, "\nStatistics:\n");
3609 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3610 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3611 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3612 tcg_dump_info(f
, cpu_fprintf
);
3615 #if !defined(CONFIG_USER_ONLY)
3617 #define MMUSUFFIX _cmmu
3618 #define GETPC() NULL
3619 #define env cpu_single_env
3620 #define SOFTMMU_CODE_ACCESS
3623 #include "softmmu_template.h"
3626 #include "softmmu_template.h"
3629 #include "softmmu_template.h"
3632 #include "softmmu_template.h"